2 * Copyright (c) 2007-2012 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <net/genetlink.h>
52 #include <net/net_namespace.h>
53 #include <net/netns/generic.h>
58 #include "genl_exec.h"
61 #include "vport-internal_dev.h"
63 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
64 LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
65 #error Kernels before 2.6.18 or after 3.8 are not supported by this version of Open vSwitch.
68 #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
69 static void rehash_flow_table(struct work_struct
*work
);
70 static DECLARE_DELAYED_WORK(rehash_flow_wq
, rehash_flow_table
);
72 int ovs_net_id __read_mostly
;
77 * Writes to device state (add/remove datapath, port, set operations on vports,
78 * etc.) are protected by RTNL.
80 * Writes to other state (flow table modifications, set miscellaneous datapath
81 * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside
84 * Reads are protected by RCU.
86 * There are a few special cases (mostly stats) that have their own
87 * synchronization but they nest under all of above and don't interact with
91 static struct vport
*new_vport(const struct vport_parms
*);
92 static int queue_gso_packets(struct net
*, int dp_ifindex
, struct sk_buff
*,
93 const struct dp_upcall_info
*);
94 static int queue_userspace_packet(struct net
*, int dp_ifindex
,
96 const struct dp_upcall_info
*);
98 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
99 static struct datapath
*get_dp(struct net
*net
, int dp_ifindex
)
101 struct datapath
*dp
= NULL
;
102 struct net_device
*dev
;
105 dev
= dev_get_by_index_rcu(net
, dp_ifindex
);
107 struct vport
*vport
= ovs_internal_dev_get_vport(dev
);
116 /* Must be called with rcu_read_lock or RTNL lock. */
117 const char *ovs_dp_name(const struct datapath
*dp
)
119 struct vport
*vport
= ovs_vport_rtnl_rcu(dp
, OVSP_LOCAL
);
120 return vport
->ops
->get_name(vport
);
123 static int get_dpifindex(struct datapath
*dp
)
130 local
= ovs_vport_rcu(dp
, OVSP_LOCAL
);
132 ifindex
= local
->ops
->get_ifindex(local
);
141 static void destroy_dp_rcu(struct rcu_head
*rcu
)
143 struct datapath
*dp
= container_of(rcu
, struct datapath
, rcu
);
145 ovs_flow_tbl_destroy((__force
struct flow_table
*)dp
->table
);
146 free_percpu(dp
->stats_percpu
);
147 release_net(ovs_dp_get_net(dp
));
152 static struct hlist_head
*vport_hash_bucket(const struct datapath
*dp
,
155 return &dp
->ports
[port_no
& (DP_VPORT_HASH_BUCKETS
- 1)];
158 struct vport
*ovs_lookup_vport(const struct datapath
*dp
, u16 port_no
)
161 struct hlist_node
*n
;
162 struct hlist_head
*head
;
164 head
= vport_hash_bucket(dp
, port_no
);
165 hlist_for_each_entry_rcu(vport
, n
, head
, dp_hash_node
) {
166 if (vport
->port_no
== port_no
)
172 /* Called with RTNL lock and genl_lock. */
173 static struct vport
*new_vport(const struct vport_parms
*parms
)
177 vport
= ovs_vport_add(parms
);
178 if (!IS_ERR(vport
)) {
179 struct datapath
*dp
= parms
->dp
;
180 struct hlist_head
*head
= vport_hash_bucket(dp
, vport
->port_no
);
182 hlist_add_head_rcu(&vport
->dp_hash_node
, head
);
187 /* Called with RTNL lock. */
188 void ovs_dp_detach_port(struct vport
*p
)
192 /* First drop references to device. */
193 hlist_del_rcu(&p
->dp_hash_node
);
195 /* Then destroy it. */
199 /* Must be called with rcu_read_lock. */
200 void ovs_dp_process_received_packet(struct vport
*p
, struct sk_buff
*skb
)
202 struct datapath
*dp
= p
->dp
;
203 struct sw_flow
*flow
;
204 struct dp_stats_percpu
*stats
;
208 stats
= this_cpu_ptr(dp
->stats_percpu
);
210 if (!OVS_CB(skb
)->flow
) {
211 struct sw_flow_key key
;
214 /* Extract flow from 'skb' into 'key'. */
215 error
= ovs_flow_extract(skb
, p
->port_no
, &key
, &key_len
);
216 if (unlikely(error
)) {
222 flow
= ovs_flow_tbl_lookup(rcu_dereference(dp
->table
),
224 if (unlikely(!flow
)) {
225 struct dp_upcall_info upcall
;
227 upcall
.cmd
= OVS_PACKET_CMD_MISS
;
229 upcall
.userdata
= NULL
;
230 upcall
.portid
= p
->upcall_portid
;
231 ovs_dp_upcall(dp
, skb
, &upcall
);
233 stats_counter
= &stats
->n_missed
;
237 OVS_CB(skb
)->flow
= flow
;
240 stats_counter
= &stats
->n_hit
;
241 ovs_flow_used(OVS_CB(skb
)->flow
, skb
);
242 ovs_execute_actions(dp
, skb
);
245 /* Update datapath statistics. */
246 u64_stats_update_begin(&stats
->sync
);
248 u64_stats_update_end(&stats
->sync
);
251 static struct genl_family dp_packet_genl_family
= {
252 .id
= GENL_ID_GENERATE
,
253 .hdrsize
= sizeof(struct ovs_header
),
254 .name
= OVS_PACKET_FAMILY
,
255 .version
= OVS_PACKET_VERSION
,
256 .maxattr
= OVS_PACKET_ATTR_MAX
,
260 int ovs_dp_upcall(struct datapath
*dp
, struct sk_buff
*skb
,
261 const struct dp_upcall_info
*upcall_info
)
263 struct dp_stats_percpu
*stats
;
267 if (upcall_info
->portid
== 0) {
272 dp_ifindex
= get_dpifindex(dp
);
278 forward_ip_summed(skb
, true);
280 if (!skb_is_gso(skb
))
281 err
= queue_userspace_packet(ovs_dp_get_net(dp
), dp_ifindex
, skb
, upcall_info
);
283 err
= queue_gso_packets(ovs_dp_get_net(dp
), dp_ifindex
, skb
, upcall_info
);
290 stats
= this_cpu_ptr(dp
->stats_percpu
);
292 u64_stats_update_begin(&stats
->sync
);
294 u64_stats_update_end(&stats
->sync
);
299 static int queue_gso_packets(struct net
*net
, int dp_ifindex
,
301 const struct dp_upcall_info
*upcall_info
)
303 unsigned short gso_type
= skb_shinfo(skb
)->gso_type
;
304 struct dp_upcall_info later_info
;
305 struct sw_flow_key later_key
;
306 struct sk_buff
*segs
, *nskb
;
309 segs
= __skb_gso_segment(skb
, NETIF_F_SG
| NETIF_F_HW_CSUM
, false);
311 return PTR_ERR(segs
);
313 /* Queue all of the segments. */
316 err
= queue_userspace_packet(net
, dp_ifindex
, skb
, upcall_info
);
320 if (skb
== segs
&& gso_type
& SKB_GSO_UDP
) {
321 /* The initial flow key extracted by ovs_flow_extract()
322 * in this case is for a first fragment, so we need to
323 * properly mark later fragments.
325 later_key
= *upcall_info
->key
;
326 later_key
.ip
.frag
= OVS_FRAG_TYPE_LATER
;
328 later_info
= *upcall_info
;
329 later_info
.key
= &later_key
;
330 upcall_info
= &later_info
;
332 } while ((skb
= skb
->next
));
334 /* Free all of the segments. */
342 } while ((skb
= nskb
));
346 static int queue_userspace_packet(struct net
*net
, int dp_ifindex
,
348 const struct dp_upcall_info
*upcall_info
)
350 struct ovs_header
*upcall
;
351 struct sk_buff
*nskb
= NULL
;
352 struct sk_buff
*user_skb
; /* to be queued to userspace */
357 if (vlan_tx_tag_present(skb
)) {
358 nskb
= skb_clone(skb
, GFP_ATOMIC
);
362 err
= vlan_deaccel_tag(nskb
);
369 if (nla_attr_size(skb
->len
) > USHRT_MAX
) {
374 len
= sizeof(struct ovs_header
);
375 len
+= nla_total_size(skb
->len
);
376 len
+= nla_total_size(FLOW_BUFSIZE
);
377 if (upcall_info
->userdata
)
378 len
+= NLA_ALIGN(upcall_info
->userdata
->nla_len
);
380 user_skb
= genlmsg_new(len
, GFP_ATOMIC
);
386 upcall
= genlmsg_put(user_skb
, 0, 0, &dp_packet_genl_family
,
387 0, upcall_info
->cmd
);
388 upcall
->dp_ifindex
= dp_ifindex
;
390 nla
= nla_nest_start(user_skb
, OVS_PACKET_ATTR_KEY
);
391 ovs_flow_to_nlattrs(upcall_info
->key
, user_skb
);
392 nla_nest_end(user_skb
, nla
);
394 if (upcall_info
->userdata
)
395 __nla_put(user_skb
, OVS_PACKET_ATTR_USERDATA
,
396 nla_len(upcall_info
->userdata
),
397 nla_data(upcall_info
->userdata
));
399 nla
= __nla_reserve(user_skb
, OVS_PACKET_ATTR_PACKET
, skb
->len
);
401 skb_copy_and_csum_dev(skb
, nla_data(nla
));
403 genlmsg_end(user_skb
, upcall
);
404 err
= genlmsg_unicast(net
, user_skb
, upcall_info
->portid
);
411 /* Called with genl_mutex. */
412 static int flush_flows(struct datapath
*dp
)
414 struct flow_table
*old_table
;
415 struct flow_table
*new_table
;
417 old_table
= genl_dereference(dp
->table
);
418 new_table
= ovs_flow_tbl_alloc(TBL_MIN_BUCKETS
);
422 rcu_assign_pointer(dp
->table
, new_table
);
424 ovs_flow_tbl_deferred_destroy(old_table
);
428 static struct nlattr
*reserve_sfa_size(struct sw_flow_actions
**sfa
, int attr_len
)
431 struct sw_flow_actions
*acts
;
433 int req_size
= NLA_ALIGN(attr_len
);
434 int next_offset
= offsetof(struct sw_flow_actions
, actions
) +
437 if (req_size
<= (ksize(*sfa
) - next_offset
))
440 new_acts_size
= ksize(*sfa
) * 2;
442 if (new_acts_size
> MAX_ACTIONS_BUFSIZE
) {
443 if ((MAX_ACTIONS_BUFSIZE
- next_offset
) < req_size
)
444 return ERR_PTR(-EMSGSIZE
);
445 new_acts_size
= MAX_ACTIONS_BUFSIZE
;
448 acts
= ovs_flow_actions_alloc(new_acts_size
);
452 memcpy(acts
->actions
, (*sfa
)->actions
, (*sfa
)->actions_len
);
453 acts
->actions_len
= (*sfa
)->actions_len
;
458 (*sfa
)->actions_len
+= req_size
;
459 return (struct nlattr
*) ((unsigned char *)(*sfa
) + next_offset
);
462 static int add_action(struct sw_flow_actions
**sfa
, int attrtype
, void *data
, int len
)
466 a
= reserve_sfa_size(sfa
, nla_attr_size(len
));
470 a
->nla_type
= attrtype
;
471 a
->nla_len
= nla_attr_size(len
);
474 memcpy(nla_data(a
), data
, len
);
475 memset((unsigned char *) a
+ a
->nla_len
, 0, nla_padlen(len
));
480 static inline int add_nested_action_start(struct sw_flow_actions
**sfa
, int attrtype
)
482 int used
= (*sfa
)->actions_len
;
485 err
= add_action(sfa
, attrtype
, NULL
, 0);
492 static inline void add_nested_action_end(struct sw_flow_actions
*sfa
, int st_offset
)
494 struct nlattr
*a
= (struct nlattr
*) ((unsigned char *)sfa
->actions
+ st_offset
);
496 a
->nla_len
= sfa
->actions_len
- st_offset
;
499 static int validate_and_copy_actions(const struct nlattr
*attr
,
500 const struct sw_flow_key
*key
, int depth
,
501 struct sw_flow_actions
**sfa
);
503 static int validate_and_copy_sample(const struct nlattr
*attr
,
504 const struct sw_flow_key
*key
, int depth
,
505 struct sw_flow_actions
**sfa
)
507 const struct nlattr
*attrs
[OVS_SAMPLE_ATTR_MAX
+ 1];
508 const struct nlattr
*probability
, *actions
;
509 const struct nlattr
*a
;
510 int rem
, start
, err
, st_acts
;
512 memset(attrs
, 0, sizeof(attrs
));
513 nla_for_each_nested(a
, attr
, rem
) {
514 int type
= nla_type(a
);
515 if (!type
|| type
> OVS_SAMPLE_ATTR_MAX
|| attrs
[type
])
522 probability
= attrs
[OVS_SAMPLE_ATTR_PROBABILITY
];
523 if (!probability
|| nla_len(probability
) != sizeof(u32
))
526 actions
= attrs
[OVS_SAMPLE_ATTR_ACTIONS
];
527 if (!actions
|| (nla_len(actions
) && nla_len(actions
) < NLA_HDRLEN
))
530 /* validation done, copy sample action. */
531 start
= add_nested_action_start(sfa
, OVS_ACTION_ATTR_SAMPLE
);
534 err
= add_action(sfa
, OVS_SAMPLE_ATTR_PROBABILITY
, nla_data(probability
), sizeof(u32
));
537 st_acts
= add_nested_action_start(sfa
, OVS_SAMPLE_ATTR_ACTIONS
);
541 err
= validate_and_copy_actions(actions
, key
, depth
+ 1, sfa
);
545 add_nested_action_end(*sfa
, st_acts
);
546 add_nested_action_end(*sfa
, start
);
551 static int validate_tp_port(const struct sw_flow_key
*flow_key
)
553 if (flow_key
->eth
.type
== htons(ETH_P_IP
)) {
554 if (flow_key
->ipv4
.tp
.src
|| flow_key
->ipv4
.tp
.dst
)
556 } else if (flow_key
->eth
.type
== htons(ETH_P_IPV6
)) {
557 if (flow_key
->ipv6
.tp
.src
|| flow_key
->ipv6
.tp
.dst
)
564 static int validate_and_copy_set_tun(const struct nlattr
*attr
,
565 struct sw_flow_actions
**sfa
)
567 struct ovs_key_ipv4_tunnel tun_key
;
570 err
= ipv4_tun_from_nlattr(nla_data(attr
), &tun_key
);
574 start
= add_nested_action_start(sfa
, OVS_ACTION_ATTR_SET
);
578 err
= add_action(sfa
, OVS_KEY_ATTR_IPV4_TUNNEL
, &tun_key
, sizeof(tun_key
));
579 add_nested_action_end(*sfa
, start
);
584 static int validate_set(const struct nlattr
*a
,
585 const struct sw_flow_key
*flow_key
,
586 struct sw_flow_actions
**sfa
,
589 const struct nlattr
*ovs_key
= nla_data(a
);
590 int key_type
= nla_type(ovs_key
);
592 /* There can be only one key in a action */
593 if (nla_total_size(nla_len(ovs_key
)) != nla_len(a
))
596 if (key_type
> OVS_KEY_ATTR_MAX
||
597 (ovs_key_lens
[key_type
] != nla_len(ovs_key
) &&
598 ovs_key_lens
[key_type
] != -1))
602 const struct ovs_key_ipv4
*ipv4_key
;
603 const struct ovs_key_ipv6
*ipv6_key
;
606 case OVS_KEY_ATTR_PRIORITY
:
607 case OVS_KEY_ATTR_TUN_ID
:
608 case OVS_KEY_ATTR_ETHERNET
:
611 case OVS_KEY_ATTR_SKB_MARK
:
612 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && !defined(CONFIG_NETFILTER)
613 if (nla_get_u32(ovs_key
) != 0)
618 case OVS_KEY_ATTR_TUNNEL
:
620 err
= validate_and_copy_set_tun(a
, sfa
);
625 case OVS_KEY_ATTR_IPV4
:
626 if (flow_key
->eth
.type
!= htons(ETH_P_IP
))
629 if (!flow_key
->ip
.proto
)
632 ipv4_key
= nla_data(ovs_key
);
633 if (ipv4_key
->ipv4_proto
!= flow_key
->ip
.proto
)
636 if (ipv4_key
->ipv4_frag
!= flow_key
->ip
.frag
)
641 case OVS_KEY_ATTR_IPV6
:
642 if (flow_key
->eth
.type
!= htons(ETH_P_IPV6
))
645 if (!flow_key
->ip
.proto
)
648 ipv6_key
= nla_data(ovs_key
);
649 if (ipv6_key
->ipv6_proto
!= flow_key
->ip
.proto
)
652 if (ipv6_key
->ipv6_frag
!= flow_key
->ip
.frag
)
655 if (ntohl(ipv6_key
->ipv6_label
) & 0xFFF00000)
660 case OVS_KEY_ATTR_TCP
:
661 if (flow_key
->ip
.proto
!= IPPROTO_TCP
)
664 return validate_tp_port(flow_key
);
666 case OVS_KEY_ATTR_UDP
:
667 if (flow_key
->ip
.proto
!= IPPROTO_UDP
)
670 return validate_tp_port(flow_key
);
679 static int validate_userspace(const struct nlattr
*attr
)
681 static const struct nla_policy userspace_policy
[OVS_USERSPACE_ATTR_MAX
+ 1] = {
682 [OVS_USERSPACE_ATTR_PID
] = {.type
= NLA_U32
},
683 [OVS_USERSPACE_ATTR_USERDATA
] = {.type
= NLA_UNSPEC
},
685 struct nlattr
*a
[OVS_USERSPACE_ATTR_MAX
+ 1];
688 error
= nla_parse_nested(a
, OVS_USERSPACE_ATTR_MAX
,
689 attr
, userspace_policy
);
693 if (!a
[OVS_USERSPACE_ATTR_PID
] ||
694 !nla_get_u32(a
[OVS_USERSPACE_ATTR_PID
]))
700 static int copy_action(const struct nlattr
*from
,
701 struct sw_flow_actions
**sfa
)
703 int totlen
= NLA_ALIGN(from
->nla_len
);
706 to
= reserve_sfa_size(sfa
, from
->nla_len
);
710 memcpy(to
, from
, totlen
);
714 static int validate_and_copy_actions(const struct nlattr
*attr
,
715 const struct sw_flow_key
*key
,
717 struct sw_flow_actions
**sfa
)
719 const struct nlattr
*a
;
722 if (depth
>= SAMPLE_ACTION_DEPTH
)
725 nla_for_each_nested(a
, attr
, rem
) {
726 /* Expected argument lengths, (u32)-1 for variable length. */
727 static const u32 action_lens
[OVS_ACTION_ATTR_MAX
+ 1] = {
728 [OVS_ACTION_ATTR_OUTPUT
] = sizeof(u32
),
729 [OVS_ACTION_ATTR_USERSPACE
] = (u32
)-1,
730 [OVS_ACTION_ATTR_PUSH_VLAN
] = sizeof(struct ovs_action_push_vlan
),
731 [OVS_ACTION_ATTR_POP_VLAN
] = 0,
732 [OVS_ACTION_ATTR_SET
] = (u32
)-1,
733 [OVS_ACTION_ATTR_SAMPLE
] = (u32
)-1
735 const struct ovs_action_push_vlan
*vlan
;
736 int type
= nla_type(a
);
739 if (type
> OVS_ACTION_ATTR_MAX
||
740 (action_lens
[type
] != nla_len(a
) &&
741 action_lens
[type
] != (u32
)-1))
746 case OVS_ACTION_ATTR_UNSPEC
:
749 case OVS_ACTION_ATTR_USERSPACE
:
750 err
= validate_userspace(a
);
755 case OVS_ACTION_ATTR_OUTPUT
:
756 if (nla_get_u32(a
) >= DP_MAX_PORTS
)
761 case OVS_ACTION_ATTR_POP_VLAN
:
764 case OVS_ACTION_ATTR_PUSH_VLAN
:
766 if (vlan
->vlan_tpid
!= htons(ETH_P_8021Q
))
768 if (!(vlan
->vlan_tci
& htons(VLAN_TAG_PRESENT
)))
772 case OVS_ACTION_ATTR_SET
:
773 err
= validate_set(a
, key
, sfa
, &skip_copy
);
778 case OVS_ACTION_ATTR_SAMPLE
:
779 err
= validate_and_copy_sample(a
, key
, depth
, sfa
);
789 err
= copy_action(a
, sfa
);
801 static void clear_stats(struct sw_flow
*flow
)
805 flow
->packet_count
= 0;
806 flow
->byte_count
= 0;
809 static int ovs_packet_cmd_execute(struct sk_buff
*skb
, struct genl_info
*info
)
811 struct ovs_header
*ovs_header
= info
->userhdr
;
812 struct nlattr
**a
= info
->attrs
;
813 struct sw_flow_actions
*acts
;
814 struct sk_buff
*packet
;
815 struct sw_flow
*flow
;
823 if (!a
[OVS_PACKET_ATTR_PACKET
] || !a
[OVS_PACKET_ATTR_KEY
] ||
824 !a
[OVS_PACKET_ATTR_ACTIONS
] ||
825 nla_len(a
[OVS_PACKET_ATTR_PACKET
]) < ETH_HLEN
)
828 len
= nla_len(a
[OVS_PACKET_ATTR_PACKET
]);
829 packet
= __dev_alloc_skb(NET_IP_ALIGN
+ len
, GFP_KERNEL
);
833 skb_reserve(packet
, NET_IP_ALIGN
);
835 memcpy(__skb_put(packet
, len
), nla_data(a
[OVS_PACKET_ATTR_PACKET
]), len
);
837 skb_reset_mac_header(packet
);
838 eth
= eth_hdr(packet
);
840 /* Normally, setting the skb 'protocol' field would be handled by a
841 * call to eth_type_trans(), but it assumes there's a sending
842 * device, which we may not have. */
843 if (ntohs(eth
->h_proto
) >= 1536)
844 packet
->protocol
= eth
->h_proto
;
846 packet
->protocol
= htons(ETH_P_802_2
);
848 /* Build an sw_flow for sending this packet. */
849 flow
= ovs_flow_alloc();
854 err
= ovs_flow_extract(packet
, -1, &flow
->key
, &key_len
);
858 err
= ovs_flow_metadata_from_nlattrs(flow
, key_len
, a
[OVS_PACKET_ATTR_KEY
]);
861 acts
= ovs_flow_actions_alloc(nla_len(a
[OVS_PACKET_ATTR_ACTIONS
]));
866 err
= validate_and_copy_actions(a
[OVS_PACKET_ATTR_ACTIONS
], &flow
->key
, 0, &acts
);
867 rcu_assign_pointer(flow
->sf_acts
, acts
);
871 OVS_CB(packet
)->flow
= flow
;
872 packet
->priority
= flow
->key
.phy
.priority
;
873 skb_set_mark(packet
, flow
->key
.phy
.skb_mark
);
876 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
882 err
= ovs_execute_actions(dp
, packet
);
899 static const struct nla_policy packet_policy
[OVS_PACKET_ATTR_MAX
+ 1] = {
900 [OVS_PACKET_ATTR_PACKET
] = { .type
= NLA_UNSPEC
},
901 [OVS_PACKET_ATTR_KEY
] = { .type
= NLA_NESTED
},
902 [OVS_PACKET_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
905 static struct genl_ops dp_packet_genl_ops
[] = {
906 { .cmd
= OVS_PACKET_CMD_EXECUTE
,
907 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
908 .policy
= packet_policy
,
909 .doit
= ovs_packet_cmd_execute
913 static void get_dp_stats(struct datapath
*dp
, struct ovs_dp_stats
*stats
)
916 struct flow_table
*table
= genl_dereference(dp
->table
);
918 stats
->n_flows
= ovs_flow_tbl_count(table
);
920 stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
921 for_each_possible_cpu(i
) {
922 const struct dp_stats_percpu
*percpu_stats
;
923 struct dp_stats_percpu local_stats
;
926 percpu_stats
= per_cpu_ptr(dp
->stats_percpu
, i
);
929 start
= u64_stats_fetch_begin_bh(&percpu_stats
->sync
);
930 local_stats
= *percpu_stats
;
931 } while (u64_stats_fetch_retry_bh(&percpu_stats
->sync
, start
));
933 stats
->n_hit
+= local_stats
.n_hit
;
934 stats
->n_missed
+= local_stats
.n_missed
;
935 stats
->n_lost
+= local_stats
.n_lost
;
939 static const struct nla_policy flow_policy
[OVS_FLOW_ATTR_MAX
+ 1] = {
940 [OVS_FLOW_ATTR_KEY
] = { .type
= NLA_NESTED
},
941 [OVS_FLOW_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
942 [OVS_FLOW_ATTR_CLEAR
] = { .type
= NLA_FLAG
},
945 static struct genl_family dp_flow_genl_family
= {
946 .id
= GENL_ID_GENERATE
,
947 .hdrsize
= sizeof(struct ovs_header
),
948 .name
= OVS_FLOW_FAMILY
,
949 .version
= OVS_FLOW_VERSION
,
950 .maxattr
= OVS_FLOW_ATTR_MAX
,
954 static struct genl_multicast_group ovs_dp_flow_multicast_group
= {
955 .name
= OVS_FLOW_MCGROUP
958 static int actions_to_attr(const struct nlattr
*attr
, int len
, struct sk_buff
*skb
);
959 static int sample_action_to_attr(const struct nlattr
*attr
, struct sk_buff
*skb
)
961 const struct nlattr
*a
;
962 struct nlattr
*start
;
965 start
= nla_nest_start(skb
, OVS_ACTION_ATTR_SAMPLE
);
969 nla_for_each_nested(a
, attr
, rem
) {
970 int type
= nla_type(a
);
971 struct nlattr
*st_sample
;
974 case OVS_SAMPLE_ATTR_PROBABILITY
:
975 if (nla_put(skb
, OVS_SAMPLE_ATTR_PROBABILITY
, sizeof(u32
), nla_data(a
)))
978 case OVS_SAMPLE_ATTR_ACTIONS
:
979 st_sample
= nla_nest_start(skb
, OVS_SAMPLE_ATTR_ACTIONS
);
982 err
= actions_to_attr(nla_data(a
), nla_len(a
), skb
);
985 nla_nest_end(skb
, st_sample
);
990 nla_nest_end(skb
, start
);
994 static int set_action_to_attr(const struct nlattr
*a
, struct sk_buff
*skb
)
996 const struct nlattr
*ovs_key
= nla_data(a
);
997 int key_type
= nla_type(ovs_key
);
998 struct nlattr
*start
;
1002 case OVS_KEY_ATTR_IPV4_TUNNEL
:
1003 start
= nla_nest_start(skb
, OVS_ACTION_ATTR_SET
);
1007 err
= ipv4_tun_to_nlattr(skb
, nla_data(ovs_key
));
1010 nla_nest_end(skb
, start
);
1013 if (nla_put(skb
, OVS_ACTION_ATTR_SET
, nla_len(a
), ovs_key
))
1021 static int actions_to_attr(const struct nlattr
*attr
, int len
, struct sk_buff
*skb
)
1023 const struct nlattr
*a
;
1026 nla_for_each_attr(a
, attr
, len
, rem
) {
1027 int type
= nla_type(a
);
1030 case OVS_ACTION_ATTR_SET
:
1031 err
= set_action_to_attr(a
, skb
);
1036 case OVS_ACTION_ATTR_SAMPLE
:
1037 err
= sample_action_to_attr(a
, skb
);
1042 if (nla_put(skb
, type
, nla_len(a
), nla_data(a
)))
1051 /* Called with genl_lock. */
1052 static int ovs_flow_cmd_fill_info(struct sw_flow
*flow
, struct datapath
*dp
,
1053 struct sk_buff
*skb
, u32 portid
,
1054 u32 seq
, u32 flags
, u8 cmd
)
1056 const int skb_orig_len
= skb
->len
;
1057 const struct sw_flow_actions
*sf_acts
;
1058 struct nlattr
*start
;
1059 struct ovs_flow_stats stats
;
1060 struct ovs_header
*ovs_header
;
1066 sf_acts
= rcu_dereference_protected(flow
->sf_acts
,
1067 lockdep_genl_is_held());
1069 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_flow_genl_family
, flags
, cmd
);
1073 ovs_header
->dp_ifindex
= get_dpifindex(dp
);
1075 nla
= nla_nest_start(skb
, OVS_FLOW_ATTR_KEY
);
1077 goto nla_put_failure
;
1078 err
= ovs_flow_to_nlattrs(&flow
->key
, skb
);
1081 nla_nest_end(skb
, nla
);
1083 spin_lock_bh(&flow
->lock
);
1085 stats
.n_packets
= flow
->packet_count
;
1086 stats
.n_bytes
= flow
->byte_count
;
1087 tcp_flags
= flow
->tcp_flags
;
1088 spin_unlock_bh(&flow
->lock
);
1091 nla_put_u64(skb
, OVS_FLOW_ATTR_USED
, ovs_flow_used_time(used
)))
1092 goto nla_put_failure
;
1094 if (stats
.n_packets
&&
1095 nla_put(skb
, OVS_FLOW_ATTR_STATS
,
1096 sizeof(struct ovs_flow_stats
), &stats
))
1097 goto nla_put_failure
;
1100 nla_put_u8(skb
, OVS_FLOW_ATTR_TCP_FLAGS
, tcp_flags
))
1101 goto nla_put_failure
;
1103 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
1104 * this is the first flow to be dumped into 'skb'. This is unusual for
1105 * Netlink but individual action lists can be longer than
1106 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
1107 * The userspace caller can always fetch the actions separately if it
1108 * really wants them. (Most userspace callers in fact don't care.)
1110 * This can only fail for dump operations because the skb is always
1111 * properly sized for single flows.
1113 start
= nla_nest_start(skb
, OVS_FLOW_ATTR_ACTIONS
);
1115 err
= actions_to_attr(sf_acts
->actions
, sf_acts
->actions_len
, skb
);
1117 nla_nest_end(skb
, start
);
1122 nla_nest_cancel(skb
, start
);
1124 } else if (skb_orig_len
)
1125 goto nla_put_failure
;
1127 return genlmsg_end(skb
, ovs_header
);
1132 genlmsg_cancel(skb
, ovs_header
);
1136 static struct sk_buff
*ovs_flow_cmd_alloc_info(struct sw_flow
*flow
)
1138 const struct sw_flow_actions
*sf_acts
;
1141 sf_acts
= rcu_dereference_protected(flow
->sf_acts
,
1142 lockdep_genl_is_held());
1144 /* OVS_FLOW_ATTR_KEY */
1145 len
= nla_total_size(FLOW_BUFSIZE
);
1146 /* OVS_FLOW_ATTR_ACTIONS */
1147 len
+= nla_total_size(sf_acts
->actions_len
);
1148 /* OVS_FLOW_ATTR_STATS */
1149 len
+= nla_total_size(sizeof(struct ovs_flow_stats
));
1150 /* OVS_FLOW_ATTR_TCP_FLAGS */
1151 len
+= nla_total_size(1);
1152 /* OVS_FLOW_ATTR_USED */
1153 len
+= nla_total_size(8);
1155 len
+= NLMSG_ALIGN(sizeof(struct ovs_header
));
1157 return genlmsg_new(len
, GFP_KERNEL
);
1160 static struct sk_buff
*ovs_flow_cmd_build_info(struct sw_flow
*flow
,
1161 struct datapath
*dp
,
1162 u32 portid
, u32 seq
, u8 cmd
)
1164 struct sk_buff
*skb
;
1167 skb
= ovs_flow_cmd_alloc_info(flow
);
1169 return ERR_PTR(-ENOMEM
);
1171 retval
= ovs_flow_cmd_fill_info(flow
, dp
, skb
, portid
, seq
, 0, cmd
);
1176 static int ovs_flow_cmd_new_or_set(struct sk_buff
*skb
, struct genl_info
*info
)
1178 struct nlattr
**a
= info
->attrs
;
1179 struct ovs_header
*ovs_header
= info
->userhdr
;
1180 struct sw_flow_key key
;
1181 struct sw_flow
*flow
;
1182 struct sk_buff
*reply
;
1183 struct datapath
*dp
;
1184 struct flow_table
*table
;
1185 struct sw_flow_actions
*acts
= NULL
;
1191 if (!a
[OVS_FLOW_ATTR_KEY
])
1193 error
= ovs_flow_from_nlattrs(&key
, &key_len
, a
[OVS_FLOW_ATTR_KEY
]);
1197 /* Validate actions. */
1198 if (a
[OVS_FLOW_ATTR_ACTIONS
]) {
1199 acts
= ovs_flow_actions_alloc(nla_len(a
[OVS_FLOW_ATTR_ACTIONS
]));
1200 error
= PTR_ERR(acts
);
1204 error
= validate_and_copy_actions(a
[OVS_FLOW_ATTR_ACTIONS
], &key
, 0, &acts
);
1207 } else if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_NEW
) {
1212 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1217 table
= genl_dereference(dp
->table
);
1218 flow
= ovs_flow_tbl_lookup(table
, &key
, key_len
);
1220 /* Bail out if we're not allowed to create a new flow. */
1222 if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_SET
)
1225 /* Expand table, if necessary, to make room. */
1226 if (ovs_flow_tbl_need_to_expand(table
)) {
1227 struct flow_table
*new_table
;
1229 new_table
= ovs_flow_tbl_expand(table
);
1230 if (!IS_ERR(new_table
)) {
1231 rcu_assign_pointer(dp
->table
, new_table
);
1232 ovs_flow_tbl_deferred_destroy(table
);
1233 table
= genl_dereference(dp
->table
);
1237 /* Allocate flow. */
1238 flow
= ovs_flow_alloc();
1240 error
= PTR_ERR(flow
);
1245 rcu_assign_pointer(flow
->sf_acts
, acts
);
1247 /* Put flow in bucket. */
1248 ovs_flow_tbl_insert(table
, flow
, &key
, key_len
);
1250 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_portid
,
1254 /* We found a matching flow. */
1255 struct sw_flow_actions
*old_acts
;
1257 /* Bail out if we're not allowed to modify an existing flow.
1258 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1259 * because Generic Netlink treats the latter as a dump
1260 * request. We also accept NLM_F_EXCL in case that bug ever
1264 if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_NEW
&&
1265 info
->nlhdr
->nlmsg_flags
& (NLM_F_CREATE
| NLM_F_EXCL
))
1268 /* Update actions. */
1269 old_acts
= rcu_dereference_protected(flow
->sf_acts
,
1270 lockdep_genl_is_held());
1271 rcu_assign_pointer(flow
->sf_acts
, acts
);
1272 ovs_flow_deferred_free_acts(old_acts
);
1274 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_portid
,
1275 info
->snd_seq
, OVS_FLOW_CMD_NEW
);
1278 if (a
[OVS_FLOW_ATTR_CLEAR
]) {
1279 spin_lock_bh(&flow
->lock
);
1281 spin_unlock_bh(&flow
->lock
);
1286 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1287 ovs_dp_flow_multicast_group
.id
, info
->nlhdr
,
1290 netlink_set_err(GENL_SOCK(sock_net(skb
->sk
)), 0,
1291 ovs_dp_flow_multicast_group
.id
, PTR_ERR(reply
));
1300 static int ovs_flow_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1302 struct nlattr
**a
= info
->attrs
;
1303 struct ovs_header
*ovs_header
= info
->userhdr
;
1304 struct sw_flow_key key
;
1305 struct sk_buff
*reply
;
1306 struct sw_flow
*flow
;
1307 struct datapath
*dp
;
1308 struct flow_table
*table
;
1312 if (!a
[OVS_FLOW_ATTR_KEY
])
1314 err
= ovs_flow_from_nlattrs(&key
, &key_len
, a
[OVS_FLOW_ATTR_KEY
]);
1318 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1322 table
= genl_dereference(dp
->table
);
1323 flow
= ovs_flow_tbl_lookup(table
, &key
, key_len
);
1327 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_portid
,
1328 info
->snd_seq
, OVS_FLOW_CMD_NEW
);
1330 return PTR_ERR(reply
);
1332 return genlmsg_reply(reply
, info
);
1335 static int ovs_flow_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1337 struct nlattr
**a
= info
->attrs
;
1338 struct ovs_header
*ovs_header
= info
->userhdr
;
1339 struct sw_flow_key key
;
1340 struct sk_buff
*reply
;
1341 struct sw_flow
*flow
;
1342 struct datapath
*dp
;
1343 struct flow_table
*table
;
1347 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1351 if (!a
[OVS_FLOW_ATTR_KEY
])
1352 return flush_flows(dp
);
1354 err
= ovs_flow_from_nlattrs(&key
, &key_len
, a
[OVS_FLOW_ATTR_KEY
]);
1358 table
= genl_dereference(dp
->table
);
1359 flow
= ovs_flow_tbl_lookup(table
, &key
, key_len
);
1363 reply
= ovs_flow_cmd_alloc_info(flow
);
1367 ovs_flow_tbl_remove(table
, flow
);
1369 err
= ovs_flow_cmd_fill_info(flow
, dp
, reply
, info
->snd_portid
,
1370 info
->snd_seq
, 0, OVS_FLOW_CMD_DEL
);
1373 ovs_flow_deferred_free(flow
);
1375 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1376 ovs_dp_flow_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1380 static int ovs_flow_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1382 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
1383 struct datapath
*dp
;
1384 struct flow_table
*table
;
1386 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1390 table
= genl_dereference(dp
->table
);
1393 struct sw_flow
*flow
;
1396 bucket
= cb
->args
[0];
1398 flow
= ovs_flow_tbl_next(table
, &bucket
, &obj
);
1402 if (ovs_flow_cmd_fill_info(flow
, dp
, skb
,
1403 NETLINK_CB(cb
->skb
).portid
,
1404 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1405 OVS_FLOW_CMD_NEW
) < 0)
1408 cb
->args
[0] = bucket
;
1414 static struct genl_ops dp_flow_genl_ops
[] = {
1415 { .cmd
= OVS_FLOW_CMD_NEW
,
1416 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1417 .policy
= flow_policy
,
1418 .doit
= ovs_flow_cmd_new_or_set
1420 { .cmd
= OVS_FLOW_CMD_DEL
,
1421 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1422 .policy
= flow_policy
,
1423 .doit
= ovs_flow_cmd_del
1425 { .cmd
= OVS_FLOW_CMD_GET
,
1426 .flags
= 0, /* OK for unprivileged users. */
1427 .policy
= flow_policy
,
1428 .doit
= ovs_flow_cmd_get
,
1429 .dumpit
= ovs_flow_cmd_dump
1431 { .cmd
= OVS_FLOW_CMD_SET
,
1432 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1433 .policy
= flow_policy
,
1434 .doit
= ovs_flow_cmd_new_or_set
,
1438 static const struct nla_policy datapath_policy
[OVS_DP_ATTR_MAX
+ 1] = {
1439 #ifdef HAVE_NLA_NUL_STRING
1440 [OVS_DP_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
1442 [OVS_DP_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
1445 static struct genl_family dp_datapath_genl_family
= {
1446 .id
= GENL_ID_GENERATE
,
1447 .hdrsize
= sizeof(struct ovs_header
),
1448 .name
= OVS_DATAPATH_FAMILY
,
1449 .version
= OVS_DATAPATH_VERSION
,
1450 .maxattr
= OVS_DP_ATTR_MAX
,
1454 static struct genl_multicast_group ovs_dp_datapath_multicast_group
= {
1455 .name
= OVS_DATAPATH_MCGROUP
1458 static int ovs_dp_cmd_fill_info(struct datapath
*dp
, struct sk_buff
*skb
,
1459 u32 portid
, u32 seq
, u32 flags
, u8 cmd
)
1461 struct ovs_header
*ovs_header
;
1462 struct ovs_dp_stats dp_stats
;
1465 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_datapath_genl_family
,
1470 ovs_header
->dp_ifindex
= get_dpifindex(dp
);
1473 err
= nla_put_string(skb
, OVS_DP_ATTR_NAME
, ovs_dp_name(dp
));
1476 goto nla_put_failure
;
1478 get_dp_stats(dp
, &dp_stats
);
1479 if (nla_put(skb
, OVS_DP_ATTR_STATS
, sizeof(struct ovs_dp_stats
), &dp_stats
))
1480 goto nla_put_failure
;
1482 return genlmsg_end(skb
, ovs_header
);
1485 genlmsg_cancel(skb
, ovs_header
);
1490 static struct sk_buff
*ovs_dp_cmd_build_info(struct datapath
*dp
, u32 portid
,
1493 struct sk_buff
*skb
;
1496 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1498 return ERR_PTR(-ENOMEM
);
1500 retval
= ovs_dp_cmd_fill_info(dp
, skb
, portid
, seq
, 0, cmd
);
1503 return ERR_PTR(retval
);
1508 static int ovs_dp_cmd_validate(struct nlattr
*a
[OVS_DP_ATTR_MAX
+ 1])
1510 return CHECK_NUL_STRING(a
[OVS_DP_ATTR_NAME
], IFNAMSIZ
- 1);
1513 /* Called with genl_mutex and optionally with RTNL lock also. */
1514 static struct datapath
*lookup_datapath(struct net
*net
,
1515 struct ovs_header
*ovs_header
,
1516 struct nlattr
*a
[OVS_DP_ATTR_MAX
+ 1])
1518 struct datapath
*dp
;
1520 if (!a
[OVS_DP_ATTR_NAME
])
1521 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
1523 struct vport
*vport
;
1526 vport
= ovs_vport_locate(net
, nla_data(a
[OVS_DP_ATTR_NAME
]));
1527 dp
= vport
&& vport
->port_no
== OVSP_LOCAL
? vport
->dp
: NULL
;
1530 return dp
? dp
: ERR_PTR(-ENODEV
);
1533 static int ovs_dp_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1535 struct nlattr
**a
= info
->attrs
;
1536 struct vport_parms parms
;
1537 struct sk_buff
*reply
;
1538 struct datapath
*dp
;
1539 struct vport
*vport
;
1540 struct ovs_net
*ovs_net
;
1544 if (!a
[OVS_DP_ATTR_NAME
] || !a
[OVS_DP_ATTR_UPCALL_PID
])
1547 err
= ovs_dp_cmd_validate(a
);
1554 dp
= kzalloc(sizeof(*dp
), GFP_KERNEL
);
1556 goto err_unlock_rtnl
;
1558 ovs_dp_set_net(dp
, hold_net(sock_net(skb
->sk
)));
1560 /* Allocate table. */
1562 rcu_assign_pointer(dp
->table
, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS
));
1566 dp
->stats_percpu
= alloc_percpu(struct dp_stats_percpu
);
1567 if (!dp
->stats_percpu
) {
1569 goto err_destroy_table
;
1572 dp
->ports
= kmalloc(DP_VPORT_HASH_BUCKETS
* sizeof(struct hlist_head
),
1576 goto err_destroy_percpu
;
1579 for (i
= 0; i
< DP_VPORT_HASH_BUCKETS
; i
++)
1580 INIT_HLIST_HEAD(&dp
->ports
[i
]);
1582 /* Set up our datapath device. */
1583 parms
.name
= nla_data(a
[OVS_DP_ATTR_NAME
]);
1584 parms
.type
= OVS_VPORT_TYPE_INTERNAL
;
1585 parms
.options
= NULL
;
1587 parms
.port_no
= OVSP_LOCAL
;
1588 parms
.upcall_portid
= nla_get_u32(a
[OVS_DP_ATTR_UPCALL_PID
]);
1590 vport
= new_vport(&parms
);
1591 if (IS_ERR(vport
)) {
1592 err
= PTR_ERR(vport
);
1596 goto err_destroy_ports_array
;
1599 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1600 info
->snd_seq
, OVS_DP_CMD_NEW
);
1601 err
= PTR_ERR(reply
);
1603 goto err_destroy_local_port
;
1605 ovs_net
= net_generic(ovs_dp_get_net(dp
), ovs_net_id
);
1606 list_add_tail(&dp
->list_node
, &ovs_net
->dps
);
1610 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1611 ovs_dp_datapath_multicast_group
.id
, info
->nlhdr
,
1615 err_destroy_local_port
:
1616 ovs_dp_detach_port(ovs_vport_rtnl(dp
, OVSP_LOCAL
));
1617 err_destroy_ports_array
:
1620 free_percpu(dp
->stats_percpu
);
1622 ovs_flow_tbl_destroy(genl_dereference(dp
->table
));
1624 release_net(ovs_dp_get_net(dp
));
1632 /* Called with genl_mutex. */
1633 static void __dp_destroy(struct datapath
*dp
)
1639 for (i
= 0; i
< DP_VPORT_HASH_BUCKETS
; i
++) {
1640 struct vport
*vport
;
1641 struct hlist_node
*node
, *n
;
1643 hlist_for_each_entry_safe(vport
, node
, n
, &dp
->ports
[i
], dp_hash_node
)
1644 if (vport
->port_no
!= OVSP_LOCAL
)
1645 ovs_dp_detach_port(vport
);
1648 list_del(&dp
->list_node
);
1649 ovs_dp_detach_port(ovs_vport_rtnl(dp
, OVSP_LOCAL
));
1651 /* rtnl_unlock() will wait until all the references to devices that
1652 * are pending unregistration have been dropped. We do it here to
1653 * ensure that any internal devices (which contain DP pointers) are
1654 * fully destroyed before freeing the datapath.
1658 call_rcu(&dp
->rcu
, destroy_dp_rcu
);
1661 static int ovs_dp_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1663 struct sk_buff
*reply
;
1664 struct datapath
*dp
;
1667 err
= ovs_dp_cmd_validate(info
->attrs
);
1671 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1676 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1677 info
->snd_seq
, OVS_DP_CMD_DEL
);
1678 err
= PTR_ERR(reply
);
1684 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1685 ovs_dp_datapath_multicast_group
.id
, info
->nlhdr
,
1691 static int ovs_dp_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1693 struct sk_buff
*reply
;
1694 struct datapath
*dp
;
1697 err
= ovs_dp_cmd_validate(info
->attrs
);
1701 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1705 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1706 info
->snd_seq
, OVS_DP_CMD_NEW
);
1707 if (IS_ERR(reply
)) {
1708 err
= PTR_ERR(reply
);
1709 netlink_set_err(GENL_SOCK(sock_net(skb
->sk
)), 0,
1710 ovs_dp_datapath_multicast_group
.id
, err
);
1714 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1715 ovs_dp_datapath_multicast_group
.id
, info
->nlhdr
,
1721 static int ovs_dp_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1723 struct sk_buff
*reply
;
1724 struct datapath
*dp
;
1727 err
= ovs_dp_cmd_validate(info
->attrs
);
1731 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1735 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1736 info
->snd_seq
, OVS_DP_CMD_NEW
);
1738 return PTR_ERR(reply
);
1740 return genlmsg_reply(reply
, info
);
1743 static int ovs_dp_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1745 struct ovs_net
*ovs_net
= net_generic(sock_net(skb
->sk
), ovs_net_id
);
1746 struct datapath
*dp
;
1747 int skip
= cb
->args
[0];
1750 list_for_each_entry(dp
, &ovs_net
->dps
, list_node
) {
1752 ovs_dp_cmd_fill_info(dp
, skb
, NETLINK_CB(cb
->skb
).portid
,
1753 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1754 OVS_DP_CMD_NEW
) < 0)
1764 static struct genl_ops dp_datapath_genl_ops
[] = {
1765 { .cmd
= OVS_DP_CMD_NEW
,
1766 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1767 .policy
= datapath_policy
,
1768 .doit
= ovs_dp_cmd_new
1770 { .cmd
= OVS_DP_CMD_DEL
,
1771 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1772 .policy
= datapath_policy
,
1773 .doit
= ovs_dp_cmd_del
1775 { .cmd
= OVS_DP_CMD_GET
,
1776 .flags
= 0, /* OK for unprivileged users. */
1777 .policy
= datapath_policy
,
1778 .doit
= ovs_dp_cmd_get
,
1779 .dumpit
= ovs_dp_cmd_dump
1781 { .cmd
= OVS_DP_CMD_SET
,
1782 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1783 .policy
= datapath_policy
,
1784 .doit
= ovs_dp_cmd_set
,
1788 static const struct nla_policy vport_policy
[OVS_VPORT_ATTR_MAX
+ 1] = {
1789 #ifdef HAVE_NLA_NUL_STRING
1790 [OVS_VPORT_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
1791 [OVS_VPORT_ATTR_STATS
] = { .len
= sizeof(struct ovs_vport_stats
) },
1793 [OVS_VPORT_ATTR_STATS
] = { .minlen
= sizeof(struct ovs_vport_stats
) },
1795 [OVS_VPORT_ATTR_PORT_NO
] = { .type
= NLA_U32
},
1796 [OVS_VPORT_ATTR_TYPE
] = { .type
= NLA_U32
},
1797 [OVS_VPORT_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
1798 [OVS_VPORT_ATTR_OPTIONS
] = { .type
= NLA_NESTED
},
1801 static struct genl_family dp_vport_genl_family
= {
1802 .id
= GENL_ID_GENERATE
,
1803 .hdrsize
= sizeof(struct ovs_header
),
1804 .name
= OVS_VPORT_FAMILY
,
1805 .version
= OVS_VPORT_VERSION
,
1806 .maxattr
= OVS_VPORT_ATTR_MAX
,
1810 struct genl_multicast_group ovs_dp_vport_multicast_group
= {
1811 .name
= OVS_VPORT_MCGROUP
1814 /* Called with RTNL lock or RCU read lock. */
1815 static int ovs_vport_cmd_fill_info(struct vport
*vport
, struct sk_buff
*skb
,
1816 u32 portid
, u32 seq
, u32 flags
, u8 cmd
)
1818 struct ovs_header
*ovs_header
;
1819 struct ovs_vport_stats vport_stats
;
1822 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_vport_genl_family
,
1827 ovs_header
->dp_ifindex
= get_dpifindex(vport
->dp
);
1829 if (nla_put_u32(skb
, OVS_VPORT_ATTR_PORT_NO
, vport
->port_no
) ||
1830 nla_put_u32(skb
, OVS_VPORT_ATTR_TYPE
, vport
->ops
->type
) ||
1831 nla_put_string(skb
, OVS_VPORT_ATTR_NAME
, vport
->ops
->get_name(vport
)) ||
1832 nla_put_u32(skb
, OVS_VPORT_ATTR_UPCALL_PID
, vport
->upcall_portid
))
1833 goto nla_put_failure
;
1835 ovs_vport_get_stats(vport
, &vport_stats
);
1836 if (nla_put(skb
, OVS_VPORT_ATTR_STATS
, sizeof(struct ovs_vport_stats
),
1838 goto nla_put_failure
;
1840 err
= ovs_vport_get_options(vport
, skb
);
1841 if (err
== -EMSGSIZE
)
1844 return genlmsg_end(skb
, ovs_header
);
1849 genlmsg_cancel(skb
, ovs_header
);
1853 /* Called with RTNL lock or RCU read lock. */
1854 struct sk_buff
*ovs_vport_cmd_build_info(struct vport
*vport
, u32 portid
,
1857 struct sk_buff
*skb
;
1860 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_ATOMIC
);
1862 return ERR_PTR(-ENOMEM
);
1864 retval
= ovs_vport_cmd_fill_info(vport
, skb
, portid
, seq
, 0, cmd
);
1867 return ERR_PTR(retval
);
1872 static int ovs_vport_cmd_validate(struct nlattr
*a
[OVS_VPORT_ATTR_MAX
+ 1])
1874 return CHECK_NUL_STRING(a
[OVS_VPORT_ATTR_NAME
], IFNAMSIZ
- 1);
1877 /* Called with RTNL lock or RCU read lock. */
1878 static struct vport
*lookup_vport(struct net
*net
,
1879 struct ovs_header
*ovs_header
,
1880 struct nlattr
*a
[OVS_VPORT_ATTR_MAX
+ 1])
1882 struct datapath
*dp
;
1883 struct vport
*vport
;
1885 if (a
[OVS_VPORT_ATTR_NAME
]) {
1886 vport
= ovs_vport_locate(net
, nla_data(a
[OVS_VPORT_ATTR_NAME
]));
1888 return ERR_PTR(-ENODEV
);
1889 if (ovs_header
->dp_ifindex
&&
1890 ovs_header
->dp_ifindex
!= get_dpifindex(vport
->dp
))
1891 return ERR_PTR(-ENODEV
);
1893 } else if (a
[OVS_VPORT_ATTR_PORT_NO
]) {
1894 u32 port_no
= nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]);
1896 if (port_no
>= DP_MAX_PORTS
)
1897 return ERR_PTR(-EFBIG
);
1899 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
1901 return ERR_PTR(-ENODEV
);
1903 vport
= ovs_vport_rtnl_rcu(dp
, port_no
);
1905 return ERR_PTR(-ENODEV
);
1908 return ERR_PTR(-EINVAL
);
1911 static int ovs_vport_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1913 struct nlattr
**a
= info
->attrs
;
1914 struct ovs_header
*ovs_header
= info
->userhdr
;
1915 struct vport_parms parms
;
1916 struct sk_buff
*reply
;
1917 struct vport
*vport
;
1918 struct datapath
*dp
;
1923 if (!a
[OVS_VPORT_ATTR_NAME
] || !a
[OVS_VPORT_ATTR_TYPE
] ||
1924 !a
[OVS_VPORT_ATTR_UPCALL_PID
])
1927 err
= ovs_vport_cmd_validate(a
);
1932 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1937 if (a
[OVS_VPORT_ATTR_PORT_NO
]) {
1938 port_no
= nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]);
1941 if (port_no
>= DP_MAX_PORTS
)
1944 vport
= ovs_vport_rtnl(dp
, port_no
);
1949 for (port_no
= 1; ; port_no
++) {
1950 if (port_no
>= DP_MAX_PORTS
) {
1954 vport
= ovs_vport_rtnl(dp
, port_no
);
1960 parms
.name
= nla_data(a
[OVS_VPORT_ATTR_NAME
]);
1961 parms
.type
= nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]);
1962 parms
.options
= a
[OVS_VPORT_ATTR_OPTIONS
];
1964 parms
.port_no
= port_no
;
1965 parms
.upcall_portid
= nla_get_u32(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
1967 vport
= new_vport(&parms
);
1968 err
= PTR_ERR(vport
);
1973 if (a
[OVS_VPORT_ATTR_STATS
])
1974 ovs_vport_set_stats(vport
, nla_data(a
[OVS_VPORT_ATTR_STATS
]));
1976 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
, info
->snd_seq
,
1978 if (IS_ERR(reply
)) {
1979 err
= PTR_ERR(reply
);
1980 ovs_dp_detach_port(vport
);
1983 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
1984 ovs_dp_vport_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
1992 static int ovs_vport_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1994 struct nlattr
**a
= info
->attrs
;
1995 struct sk_buff
*reply
;
1996 struct vport
*vport
;
1999 err
= ovs_vport_cmd_validate(a
);
2004 vport
= lookup_vport(sock_net(skb
->sk
), info
->userhdr
, a
);
2005 err
= PTR_ERR(vport
);
2010 if (a
[OVS_VPORT_ATTR_TYPE
] &&
2011 nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]) != vport
->ops
->type
)
2014 if (!err
&& a
[OVS_VPORT_ATTR_OPTIONS
])
2015 err
= ovs_vport_set_options(vport
, a
[OVS_VPORT_ATTR_OPTIONS
]);
2019 if (a
[OVS_VPORT_ATTR_STATS
])
2020 ovs_vport_set_stats(vport
, nla_data(a
[OVS_VPORT_ATTR_STATS
]));
2022 if (a
[OVS_VPORT_ATTR_UPCALL_PID
])
2023 vport
->upcall_portid
= nla_get_u32(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
2025 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
,
2026 info
->snd_seq
, OVS_VPORT_CMD_NEW
);
2027 if (IS_ERR(reply
)) {
2028 netlink_set_err(GENL_SOCK(sock_net(skb
->sk
)), 0,
2029 ovs_dp_vport_multicast_group
.id
, PTR_ERR(reply
));
2033 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
2034 ovs_dp_vport_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
2042 static int ovs_vport_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
2044 struct nlattr
**a
= info
->attrs
;
2045 struct sk_buff
*reply
;
2046 struct vport
*vport
;
2049 err
= ovs_vport_cmd_validate(a
);
2054 vport
= lookup_vport(sock_net(skb
->sk
), info
->userhdr
, a
);
2055 err
= PTR_ERR(vport
);
2059 if (vport
->port_no
== OVSP_LOCAL
) {
2064 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
,
2065 info
->snd_seq
, OVS_VPORT_CMD_DEL
);
2066 err
= PTR_ERR(reply
);
2071 ovs_dp_detach_port(vport
);
2073 genl_notify(reply
, genl_info_net(info
), info
->snd_portid
,
2074 ovs_dp_vport_multicast_group
.id
, info
->nlhdr
, GFP_KERNEL
);
2082 static int ovs_vport_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
2084 struct nlattr
**a
= info
->attrs
;
2085 struct ovs_header
*ovs_header
= info
->userhdr
;
2086 struct sk_buff
*reply
;
2087 struct vport
*vport
;
2090 err
= ovs_vport_cmd_validate(a
);
2095 vport
= lookup_vport(sock_net(skb
->sk
), ovs_header
, a
);
2096 err
= PTR_ERR(vport
);
2100 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
,
2101 info
->snd_seq
, OVS_VPORT_CMD_NEW
);
2102 err
= PTR_ERR(reply
);
2108 return genlmsg_reply(reply
, info
);
2116 static int ovs_vport_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2118 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
2119 struct datapath
*dp
;
2120 int bucket
= cb
->args
[0], skip
= cb
->args
[1];
2123 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
2128 for (i
= bucket
; i
< DP_VPORT_HASH_BUCKETS
; i
++) {
2129 struct vport
*vport
;
2130 struct hlist_node
*n
;
2133 hlist_for_each_entry_rcu(vport
, n
, &dp
->ports
[i
], dp_hash_node
) {
2135 ovs_vport_cmd_fill_info(vport
, skb
,
2136 NETLINK_CB(cb
->skb
).portid
,
2139 OVS_VPORT_CMD_NEW
) < 0)
2155 static struct genl_ops dp_vport_genl_ops
[] = {
2156 { .cmd
= OVS_VPORT_CMD_NEW
,
2157 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
2158 .policy
= vport_policy
,
2159 .doit
= ovs_vport_cmd_new
2161 { .cmd
= OVS_VPORT_CMD_DEL
,
2162 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
2163 .policy
= vport_policy
,
2164 .doit
= ovs_vport_cmd_del
2166 { .cmd
= OVS_VPORT_CMD_GET
,
2167 .flags
= 0, /* OK for unprivileged users. */
2168 .policy
= vport_policy
,
2169 .doit
= ovs_vport_cmd_get
,
2170 .dumpit
= ovs_vport_cmd_dump
2172 { .cmd
= OVS_VPORT_CMD_SET
,
2173 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
2174 .policy
= vport_policy
,
2175 .doit
= ovs_vport_cmd_set
,
2179 struct genl_family_and_ops
{
2180 struct genl_family
*family
;
2181 struct genl_ops
*ops
;
2183 struct genl_multicast_group
*group
;
2186 static const struct genl_family_and_ops dp_genl_families
[] = {
2187 { &dp_datapath_genl_family
,
2188 dp_datapath_genl_ops
, ARRAY_SIZE(dp_datapath_genl_ops
),
2189 &ovs_dp_datapath_multicast_group
},
2190 { &dp_vport_genl_family
,
2191 dp_vport_genl_ops
, ARRAY_SIZE(dp_vport_genl_ops
),
2192 &ovs_dp_vport_multicast_group
},
2193 { &dp_flow_genl_family
,
2194 dp_flow_genl_ops
, ARRAY_SIZE(dp_flow_genl_ops
),
2195 &ovs_dp_flow_multicast_group
},
2196 { &dp_packet_genl_family
,
2197 dp_packet_genl_ops
, ARRAY_SIZE(dp_packet_genl_ops
),
2201 static void dp_unregister_genl(int n_families
)
2205 for (i
= 0; i
< n_families
; i
++)
2206 genl_unregister_family(dp_genl_families
[i
].family
);
2209 static int dp_register_genl(void)
2216 for (i
= 0; i
< ARRAY_SIZE(dp_genl_families
); i
++) {
2217 const struct genl_family_and_ops
*f
= &dp_genl_families
[i
];
2219 err
= genl_register_family_with_ops(f
->family
, f
->ops
,
2226 err
= genl_register_mc_group(f
->family
, f
->group
);
2235 dp_unregister_genl(n_registered
);
2239 static int __rehash_flow_table(void *dummy
)
2241 struct datapath
*dp
;
2246 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
2248 list_for_each_entry(dp
, &ovs_net
->dps
, list_node
) {
2249 struct flow_table
*old_table
= genl_dereference(dp
->table
);
2250 struct flow_table
*new_table
;
2252 new_table
= ovs_flow_tbl_rehash(old_table
);
2253 if (!IS_ERR(new_table
)) {
2254 rcu_assign_pointer(dp
->table
, new_table
);
2255 ovs_flow_tbl_deferred_destroy(old_table
);
2263 static void rehash_flow_table(struct work_struct
*work
)
2265 genl_exec(__rehash_flow_table
, NULL
);
2266 schedule_delayed_work(&rehash_flow_wq
, REHASH_FLOW_INTERVAL
);
2269 static int dp_destroy_all(void *data
)
2271 struct datapath
*dp
, *dp_next
;
2272 struct ovs_net
*ovs_net
= data
;
2274 list_for_each_entry_safe(dp
, dp_next
, &ovs_net
->dps
, list_node
)
2280 static int __net_init
ovs_init_net(struct net
*net
)
2282 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
2284 INIT_LIST_HEAD(&ovs_net
->dps
);
2288 static void __net_exit
ovs_exit_net(struct net
*net
)
2290 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
2292 genl_exec(dp_destroy_all
, ovs_net
);
2295 static struct pernet_operations ovs_net_ops
= {
2296 .init
= ovs_init_net
,
2297 .exit
= ovs_exit_net
,
2299 .size
= sizeof(struct ovs_net
),
2302 static int __init
dp_init(void)
2306 BUILD_BUG_ON(sizeof(struct ovs_skb_cb
) > FIELD_SIZEOF(struct sk_buff
, cb
));
2308 pr_info("Open vSwitch switching datapath %s, built "__DATE__
" "__TIME__
"\n",
2311 err
= genl_exec_init();
2315 err
= ovs_workqueues_init();
2317 goto error_genl_exec
;
2319 err
= ovs_tnl_init();
2323 err
= ovs_flow_init();
2325 goto error_tnl_exit
;
2327 err
= ovs_vport_init();
2329 goto error_flow_exit
;
2331 err
= register_pernet_device(&ovs_net_ops
);
2333 goto error_vport_exit
;
2335 err
= register_netdevice_notifier(&ovs_dp_device_notifier
);
2337 goto error_netns_exit
;
2339 err
= dp_register_genl();
2341 goto error_unreg_notifier
;
2343 schedule_delayed_work(&rehash_flow_wq
, REHASH_FLOW_INTERVAL
);
2347 error_unreg_notifier
:
2348 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
2350 unregister_pernet_device(&ovs_net_ops
);
2358 ovs_workqueues_exit();
2365 static void dp_cleanup(void)
2367 cancel_delayed_work_sync(&rehash_flow_wq
);
2368 dp_unregister_genl(ARRAY_SIZE(dp_genl_families
));
2369 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
2370 unregister_pernet_device(&ovs_net_ops
);
2375 ovs_workqueues_exit();
2379 module_init(dp_init
);
2380 module_exit(dp_cleanup
);
2382 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2383 MODULE_LICENSE("GPL");
2384 MODULE_VERSION(VERSION
);