2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <linux/genetlink.h>
52 #include <net/genetlink.h>
53 #include <net/genetlink.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
59 #include "flow_netlink.h"
61 #include "vport-internal_dev.h"
62 #include "vport-netdev.h"
64 int ovs_net_id __read_mostly
;
66 static void ovs_notify(struct sk_buff
*skb
, struct genl_info
*info
,
67 struct genl_multicast_group
*grp
)
69 genl_notify(skb
, genl_info_net(info
), info
->snd_portid
,
70 grp
->id
, info
->nlhdr
, GFP_KERNEL
);
76 * All writes e.g. Writes to device state (add/remove datapath, port, set
77 * operations on vports, etc.), Writes to other state (flow table
78 * modifications, set miscellaneous datapath parameters, etc.) are protected
81 * Reads are protected by RCU.
83 * There are a few special cases (mostly stats) that have their own
84 * synchronization but they nest under all of above and don't interact with
87 * The RTNL lock nests inside ovs_mutex.
90 static DEFINE_MUTEX(ovs_mutex
);
94 mutex_lock(&ovs_mutex
);
99 mutex_unlock(&ovs_mutex
);
102 #ifdef CONFIG_LOCKDEP
103 int lockdep_ovsl_is_held(void)
106 return lockdep_is_held(&ovs_mutex
);
112 static struct vport
*new_vport(const struct vport_parms
*);
113 static int queue_gso_packets(struct net
*, int dp_ifindex
, struct sk_buff
*,
114 const struct dp_upcall_info
*);
115 static int queue_userspace_packet(struct net
*, int dp_ifindex
,
117 const struct dp_upcall_info
*);
119 /* Must be called with rcu_read_lock or ovs_mutex. */
120 static struct datapath
*get_dp(struct net
*net
, int dp_ifindex
)
122 struct datapath
*dp
= NULL
;
123 struct net_device
*dev
;
126 dev
= dev_get_by_index_rcu(net
, dp_ifindex
);
128 struct vport
*vport
= ovs_internal_dev_get_vport(dev
);
137 /* Must be called with rcu_read_lock or ovs_mutex. */
138 const char *ovs_dp_name(const struct datapath
*dp
)
140 struct vport
*vport
= ovs_vport_ovsl_rcu(dp
, OVSP_LOCAL
);
141 return vport
->ops
->get_name(vport
);
144 static int get_dpifindex(struct datapath
*dp
)
151 local
= ovs_vport_rcu(dp
, OVSP_LOCAL
);
153 ifindex
= netdev_vport_priv(local
)->dev
->ifindex
;
162 static void destroy_dp_rcu(struct rcu_head
*rcu
)
164 struct datapath
*dp
= container_of(rcu
, struct datapath
, rcu
);
166 ovs_flow_tbl_destroy(&dp
->table
);
167 free_percpu(dp
->stats_percpu
);
168 release_net(ovs_dp_get_net(dp
));
173 static struct hlist_head
*vport_hash_bucket(const struct datapath
*dp
,
176 return &dp
->ports
[port_no
& (DP_VPORT_HASH_BUCKETS
- 1)];
179 struct vport
*ovs_lookup_vport(const struct datapath
*dp
, u16 port_no
)
182 struct hlist_head
*head
;
184 head
= vport_hash_bucket(dp
, port_no
);
185 hlist_for_each_entry_rcu(vport
, head
, dp_hash_node
) {
186 if (vport
->port_no
== port_no
)
192 /* Called with ovs_mutex. */
193 static struct vport
*new_vport(const struct vport_parms
*parms
)
197 vport
= ovs_vport_add(parms
);
198 if (!IS_ERR(vport
)) {
199 struct datapath
*dp
= parms
->dp
;
200 struct hlist_head
*head
= vport_hash_bucket(dp
, vport
->port_no
);
202 hlist_add_head_rcu(&vport
->dp_hash_node
, head
);
207 void ovs_dp_detach_port(struct vport
*p
)
211 /* First drop references to device. */
212 hlist_del_rcu(&p
->dp_hash_node
);
214 /* Then destroy it. */
218 /* Must be called with rcu_read_lock. */
219 void ovs_dp_process_received_packet(struct vport
*p
, struct sk_buff
*skb
)
221 struct datapath
*dp
= p
->dp
;
222 struct sw_flow
*flow
;
223 struct dp_stats_percpu
*stats
;
224 struct sw_flow_key key
;
228 stats
= this_cpu_ptr(dp
->stats_percpu
);
230 /* Extract flow from 'skb' into 'key'. */
231 error
= ovs_flow_extract(skb
, p
->port_no
, &key
);
232 if (unlikely(error
)) {
238 flow
= ovs_flow_tbl_lookup(&dp
->table
, &key
);
239 if (unlikely(!flow
)) {
240 struct dp_upcall_info upcall
;
242 upcall
.cmd
= OVS_PACKET_CMD_MISS
;
244 upcall
.userdata
= NULL
;
245 upcall
.portid
= p
->upcall_portid
;
246 ovs_dp_upcall(dp
, skb
, &upcall
);
248 stats_counter
= &stats
->n_missed
;
252 OVS_CB(skb
)->flow
= flow
;
253 OVS_CB(skb
)->pkt_key
= &key
;
255 stats_counter
= &stats
->n_hit
;
256 ovs_flow_used(OVS_CB(skb
)->flow
, skb
);
257 ovs_execute_actions(dp
, skb
);
260 /* Update datapath statistics. */
261 u64_stats_update_begin(&stats
->sync
);
263 u64_stats_update_end(&stats
->sync
);
266 static struct genl_family dp_packet_genl_family
= {
267 .id
= GENL_ID_GENERATE
,
268 .hdrsize
= sizeof(struct ovs_header
),
269 .name
= OVS_PACKET_FAMILY
,
270 .version
= OVS_PACKET_VERSION
,
271 .maxattr
= OVS_PACKET_ATTR_MAX
,
276 int ovs_dp_upcall(struct datapath
*dp
, struct sk_buff
*skb
,
277 const struct dp_upcall_info
*upcall_info
)
279 struct dp_stats_percpu
*stats
;
283 if (upcall_info
->portid
== 0) {
288 dp_ifindex
= get_dpifindex(dp
);
294 if (!skb_is_gso(skb
))
295 err
= queue_userspace_packet(ovs_dp_get_net(dp
), dp_ifindex
, skb
, upcall_info
);
297 err
= queue_gso_packets(ovs_dp_get_net(dp
), dp_ifindex
, skb
, upcall_info
);
304 stats
= this_cpu_ptr(dp
->stats_percpu
);
306 u64_stats_update_begin(&stats
->sync
);
308 u64_stats_update_end(&stats
->sync
);
313 static int queue_gso_packets(struct net
*net
, int dp_ifindex
,
315 const struct dp_upcall_info
*upcall_info
)
317 unsigned short gso_type
= skb_shinfo(skb
)->gso_type
;
318 struct dp_upcall_info later_info
;
319 struct sw_flow_key later_key
;
320 struct sk_buff
*segs
, *nskb
;
323 segs
= __skb_gso_segment(skb
, NETIF_F_SG
| NETIF_F_HW_CSUM
, false);
325 return PTR_ERR(segs
);
327 /* Queue all of the segments. */
330 err
= queue_userspace_packet(net
, dp_ifindex
, skb
, upcall_info
);
334 if (skb
== segs
&& gso_type
& SKB_GSO_UDP
) {
335 /* The initial flow key extracted by ovs_flow_extract()
336 * in this case is for a first fragment, so we need to
337 * properly mark later fragments.
339 later_key
= *upcall_info
->key
;
340 later_key
.ip
.frag
= OVS_FRAG_TYPE_LATER
;
342 later_info
= *upcall_info
;
343 later_info
.key
= &later_key
;
344 upcall_info
= &later_info
;
346 } while ((skb
= skb
->next
));
348 /* Free all of the segments. */
356 } while ((skb
= nskb
));
360 static size_t key_attr_size(void)
362 return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
363 + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
364 + nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */
365 + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
366 + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
367 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
368 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
369 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
370 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
371 + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
372 + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
373 + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
374 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
375 + nla_total_size(4) /* OVS_KEY_ATTR_8021Q */
376 + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
377 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
378 + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
379 + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
380 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
383 static size_t upcall_msg_size(const struct sk_buff
*skb
,
384 const struct nlattr
*userdata
)
386 size_t size
= NLMSG_ALIGN(sizeof(struct ovs_header
))
387 + nla_total_size(skb
->len
) /* OVS_PACKET_ATTR_PACKET */
388 + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
390 /* OVS_PACKET_ATTR_USERDATA */
392 size
+= NLA_ALIGN(userdata
->nla_len
);
397 static int queue_userspace_packet(struct net
*net
, int dp_ifindex
,
399 const struct dp_upcall_info
*upcall_info
)
401 struct ovs_header
*upcall
;
402 struct sk_buff
*nskb
= NULL
;
403 struct sk_buff
*user_skb
; /* to be queued to userspace */
407 if (vlan_tx_tag_present(skb
)) {
408 nskb
= skb_clone(skb
, GFP_ATOMIC
);
412 nskb
= __vlan_put_tag(nskb
, nskb
->vlan_proto
, vlan_tx_tag_get(nskb
));
416 vlan_set_tci(nskb
, 0);
421 if (nla_attr_size(skb
->len
) > USHRT_MAX
) {
426 user_skb
= genlmsg_new(upcall_msg_size(skb
, upcall_info
->userdata
), GFP_ATOMIC
);
432 upcall
= genlmsg_put(user_skb
, 0, 0, &dp_packet_genl_family
,
433 0, upcall_info
->cmd
);
434 upcall
->dp_ifindex
= dp_ifindex
;
436 nla
= nla_nest_start(user_skb
, OVS_PACKET_ATTR_KEY
);
437 ovs_nla_put_flow(upcall_info
->key
, upcall_info
->key
, user_skb
);
438 nla_nest_end(user_skb
, nla
);
440 if (upcall_info
->userdata
)
441 __nla_put(user_skb
, OVS_PACKET_ATTR_USERDATA
,
442 nla_len(upcall_info
->userdata
),
443 nla_data(upcall_info
->userdata
));
445 nla
= __nla_reserve(user_skb
, OVS_PACKET_ATTR_PACKET
, skb
->len
);
447 skb_copy_and_csum_dev(skb
, nla_data(nla
));
449 genlmsg_end(user_skb
, upcall
);
450 err
= genlmsg_unicast(net
, user_skb
, upcall_info
->portid
);
457 static void clear_stats(struct sw_flow
*flow
)
461 flow
->packet_count
= 0;
462 flow
->byte_count
= 0;
465 static int ovs_packet_cmd_execute(struct sk_buff
*skb
, struct genl_info
*info
)
467 struct ovs_header
*ovs_header
= info
->userhdr
;
468 struct nlattr
**a
= info
->attrs
;
469 struct sw_flow_actions
*acts
;
470 struct sk_buff
*packet
;
471 struct sw_flow
*flow
;
478 if (!a
[OVS_PACKET_ATTR_PACKET
] || !a
[OVS_PACKET_ATTR_KEY
] ||
479 !a
[OVS_PACKET_ATTR_ACTIONS
])
482 len
= nla_len(a
[OVS_PACKET_ATTR_PACKET
]);
483 packet
= __dev_alloc_skb(NET_IP_ALIGN
+ len
, GFP_KERNEL
);
487 skb_reserve(packet
, NET_IP_ALIGN
);
489 nla_memcpy(__skb_put(packet
, len
), a
[OVS_PACKET_ATTR_PACKET
], len
);
491 skb_reset_mac_header(packet
);
492 eth
= eth_hdr(packet
);
494 /* Normally, setting the skb 'protocol' field would be handled by a
495 * call to eth_type_trans(), but it assumes there's a sending
496 * device, which we may not have. */
497 if (ntohs(eth
->h_proto
) >= ETH_P_802_3_MIN
)
498 packet
->protocol
= eth
->h_proto
;
500 packet
->protocol
= htons(ETH_P_802_2
);
502 /* Build an sw_flow for sending this packet. */
503 flow
= ovs_flow_alloc();
508 err
= ovs_flow_extract(packet
, -1, &flow
->key
);
512 err
= ovs_nla_get_flow_metadata(flow
, a
[OVS_PACKET_ATTR_KEY
]);
515 acts
= ovs_nla_alloc_flow_actions(nla_len(a
[OVS_PACKET_ATTR_ACTIONS
]));
520 err
= ovs_nla_copy_actions(a
[OVS_PACKET_ATTR_ACTIONS
],
521 &flow
->key
, 0, &acts
);
522 rcu_assign_pointer(flow
->sf_acts
, acts
);
526 OVS_CB(packet
)->flow
= flow
;
527 OVS_CB(packet
)->pkt_key
= &flow
->key
;
528 packet
->priority
= flow
->key
.phy
.priority
;
529 packet
->mark
= flow
->key
.phy
.skb_mark
;
532 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
538 err
= ovs_execute_actions(dp
, packet
);
542 ovs_flow_free(flow
, false);
548 ovs_flow_free(flow
, false);
555 static const struct nla_policy packet_policy
[OVS_PACKET_ATTR_MAX
+ 1] = {
556 [OVS_PACKET_ATTR_PACKET
] = { .len
= ETH_HLEN
},
557 [OVS_PACKET_ATTR_KEY
] = { .type
= NLA_NESTED
},
558 [OVS_PACKET_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
561 static struct genl_ops dp_packet_genl_ops
[] = {
562 { .cmd
= OVS_PACKET_CMD_EXECUTE
,
563 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
564 .policy
= packet_policy
,
565 .doit
= ovs_packet_cmd_execute
569 static void get_dp_stats(struct datapath
*dp
, struct ovs_dp_stats
*stats
)
573 stats
->n_flows
= ovs_flow_tbl_count(&dp
->table
);
575 stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
576 for_each_possible_cpu(i
) {
577 const struct dp_stats_percpu
*percpu_stats
;
578 struct dp_stats_percpu local_stats
;
581 percpu_stats
= per_cpu_ptr(dp
->stats_percpu
, i
);
584 start
= u64_stats_fetch_begin_bh(&percpu_stats
->sync
);
585 local_stats
= *percpu_stats
;
586 } while (u64_stats_fetch_retry_bh(&percpu_stats
->sync
, start
));
588 stats
->n_hit
+= local_stats
.n_hit
;
589 stats
->n_missed
+= local_stats
.n_missed
;
590 stats
->n_lost
+= local_stats
.n_lost
;
594 static const struct nla_policy flow_policy
[OVS_FLOW_ATTR_MAX
+ 1] = {
595 [OVS_FLOW_ATTR_KEY
] = { .type
= NLA_NESTED
},
596 [OVS_FLOW_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
597 [OVS_FLOW_ATTR_CLEAR
] = { .type
= NLA_FLAG
},
600 static struct genl_family dp_flow_genl_family
= {
601 .id
= GENL_ID_GENERATE
,
602 .hdrsize
= sizeof(struct ovs_header
),
603 .name
= OVS_FLOW_FAMILY
,
604 .version
= OVS_FLOW_VERSION
,
605 .maxattr
= OVS_FLOW_ATTR_MAX
,
610 static struct genl_multicast_group ovs_dp_flow_multicast_group
= {
611 .name
= OVS_FLOW_MCGROUP
614 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions
*acts
)
616 return NLMSG_ALIGN(sizeof(struct ovs_header
))
617 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
618 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
619 + nla_total_size(sizeof(struct ovs_flow_stats
)) /* OVS_FLOW_ATTR_STATS */
620 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
621 + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
622 + nla_total_size(acts
->actions_len
); /* OVS_FLOW_ATTR_ACTIONS */
625 /* Called with ovs_mutex. */
626 static int ovs_flow_cmd_fill_info(struct sw_flow
*flow
, struct datapath
*dp
,
627 struct sk_buff
*skb
, u32 portid
,
628 u32 seq
, u32 flags
, u8 cmd
)
630 const int skb_orig_len
= skb
->len
;
631 struct nlattr
*start
;
632 struct ovs_flow_stats stats
;
633 struct ovs_header
*ovs_header
;
639 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_flow_genl_family
, flags
, cmd
);
643 ovs_header
->dp_ifindex
= get_dpifindex(dp
);
646 nla
= nla_nest_start(skb
, OVS_FLOW_ATTR_KEY
);
648 goto nla_put_failure
;
650 err
= ovs_nla_put_flow(&flow
->unmasked_key
, &flow
->unmasked_key
, skb
);
653 nla_nest_end(skb
, nla
);
655 nla
= nla_nest_start(skb
, OVS_FLOW_ATTR_MASK
);
657 goto nla_put_failure
;
659 err
= ovs_nla_put_flow(&flow
->key
, &flow
->mask
->key
, skb
);
663 nla_nest_end(skb
, nla
);
665 spin_lock_bh(&flow
->lock
);
667 stats
.n_packets
= flow
->packet_count
;
668 stats
.n_bytes
= flow
->byte_count
;
669 tcp_flags
= flow
->tcp_flags
;
670 spin_unlock_bh(&flow
->lock
);
673 nla_put_u64(skb
, OVS_FLOW_ATTR_USED
, ovs_flow_used_time(used
)))
674 goto nla_put_failure
;
676 if (stats
.n_packets
&&
677 nla_put(skb
, OVS_FLOW_ATTR_STATS
,
678 sizeof(struct ovs_flow_stats
), &stats
))
679 goto nla_put_failure
;
682 nla_put_u8(skb
, OVS_FLOW_ATTR_TCP_FLAGS
, tcp_flags
))
683 goto nla_put_failure
;
685 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
686 * this is the first flow to be dumped into 'skb'. This is unusual for
687 * Netlink but individual action lists can be longer than
688 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
689 * The userspace caller can always fetch the actions separately if it
690 * really wants them. (Most userspace callers in fact don't care.)
692 * This can only fail for dump operations because the skb is always
693 * properly sized for single flows.
695 start
= nla_nest_start(skb
, OVS_FLOW_ATTR_ACTIONS
);
697 const struct sw_flow_actions
*sf_acts
;
699 sf_acts
= rcu_dereference_check(flow
->sf_acts
,
700 lockdep_ovsl_is_held());
702 err
= ovs_nla_put_actions(sf_acts
->actions
,
703 sf_acts
->actions_len
, skb
);
705 nla_nest_end(skb
, start
);
710 nla_nest_cancel(skb
, start
);
712 } else if (skb_orig_len
)
713 goto nla_put_failure
;
715 return genlmsg_end(skb
, ovs_header
);
720 genlmsg_cancel(skb
, ovs_header
);
724 static struct sk_buff
*ovs_flow_cmd_alloc_info(struct sw_flow
*flow
)
726 const struct sw_flow_actions
*sf_acts
;
728 sf_acts
= ovsl_dereference(flow
->sf_acts
);
730 return genlmsg_new(ovs_flow_cmd_msg_size(sf_acts
), GFP_KERNEL
);
733 static struct sk_buff
*ovs_flow_cmd_build_info(struct sw_flow
*flow
,
735 u32 portid
, u32 seq
, u8 cmd
)
740 skb
= ovs_flow_cmd_alloc_info(flow
);
742 return ERR_PTR(-ENOMEM
);
744 retval
= ovs_flow_cmd_fill_info(flow
, dp
, skb
, portid
, seq
, 0, cmd
);
749 static int ovs_flow_cmd_new_or_set(struct sk_buff
*skb
, struct genl_info
*info
)
751 struct nlattr
**a
= info
->attrs
;
752 struct ovs_header
*ovs_header
= info
->userhdr
;
753 struct sw_flow_key key
, masked_key
;
754 struct sw_flow
*flow
= NULL
;
755 struct sw_flow_mask mask
;
756 struct sk_buff
*reply
;
758 struct sw_flow_actions
*acts
= NULL
;
759 struct sw_flow_match match
;
764 if (!a
[OVS_FLOW_ATTR_KEY
])
767 ovs_match_init(&match
, &key
, &mask
);
768 error
= ovs_nla_get_match(&match
,
769 a
[OVS_FLOW_ATTR_KEY
], a
[OVS_FLOW_ATTR_MASK
]);
773 /* Validate actions. */
774 if (a
[OVS_FLOW_ATTR_ACTIONS
]) {
775 acts
= ovs_nla_alloc_flow_actions(nla_len(a
[OVS_FLOW_ATTR_ACTIONS
]));
776 error
= PTR_ERR(acts
);
780 ovs_flow_mask_key(&masked_key
, &key
, &mask
);
781 error
= ovs_nla_copy_actions(a
[OVS_FLOW_ATTR_ACTIONS
],
782 &masked_key
, 0, &acts
);
784 OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
787 } else if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_NEW
) {
793 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
798 /* Check if this is a duplicate flow */
799 flow
= ovs_flow_tbl_lookup(&dp
->table
, &key
);
801 /* Bail out if we're not allowed to create a new flow. */
803 if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_SET
)
807 flow
= ovs_flow_alloc();
809 error
= PTR_ERR(flow
);
814 flow
->key
= masked_key
;
815 flow
->unmasked_key
= key
;
816 rcu_assign_pointer(flow
->sf_acts
, acts
);
818 /* Put flow in bucket. */
819 error
= ovs_flow_tbl_insert(&dp
->table
, flow
, &mask
);
825 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_portid
,
826 info
->snd_seq
, OVS_FLOW_CMD_NEW
);
828 /* We found a matching flow. */
829 struct sw_flow_actions
*old_acts
;
831 /* Bail out if we're not allowed to modify an existing flow.
832 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
833 * because Generic Netlink treats the latter as a dump
834 * request. We also accept NLM_F_EXCL in case that bug ever
838 if (info
->genlhdr
->cmd
== OVS_FLOW_CMD_NEW
&&
839 info
->nlhdr
->nlmsg_flags
& (NLM_F_CREATE
| NLM_F_EXCL
))
842 /* The unmasked key has to be the same for flow updates. */
844 if (!ovs_flow_cmp_unmasked_key(flow
, &match
)) {
845 OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
849 /* Update actions. */
850 old_acts
= ovsl_dereference(flow
->sf_acts
);
851 rcu_assign_pointer(flow
->sf_acts
, acts
);
852 ovs_nla_free_flow_actions(old_acts
);
854 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_portid
,
855 info
->snd_seq
, OVS_FLOW_CMD_NEW
);
858 if (a
[OVS_FLOW_ATTR_CLEAR
]) {
859 spin_lock_bh(&flow
->lock
);
861 spin_unlock_bh(&flow
->lock
);
867 ovs_notify(reply
, info
, &ovs_dp_flow_multicast_group
);
869 netlink_set_err(sock_net(skb
->sk
)->genl_sock
, 0,
870 ovs_dp_flow_multicast_group
.id
, PTR_ERR(reply
));
874 ovs_flow_free(flow
, false);
883 static int ovs_flow_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
885 struct nlattr
**a
= info
->attrs
;
886 struct ovs_header
*ovs_header
= info
->userhdr
;
887 struct sw_flow_key key
;
888 struct sk_buff
*reply
;
889 struct sw_flow
*flow
;
891 struct sw_flow_match match
;
894 if (!a
[OVS_FLOW_ATTR_KEY
]) {
895 OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
899 ovs_match_init(&match
, &key
, NULL
);
900 err
= ovs_nla_get_match(&match
, a
[OVS_FLOW_ATTR_KEY
], NULL
);
905 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
911 flow
= ovs_flow_tbl_lookup(&dp
->table
, &key
);
912 if (!flow
|| !ovs_flow_cmp_unmasked_key(flow
, &match
)) {
917 reply
= ovs_flow_cmd_build_info(flow
, dp
, info
->snd_portid
,
918 info
->snd_seq
, OVS_FLOW_CMD_NEW
);
920 err
= PTR_ERR(reply
);
925 return genlmsg_reply(reply
, info
);
931 static int ovs_flow_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
933 struct nlattr
**a
= info
->attrs
;
934 struct ovs_header
*ovs_header
= info
->userhdr
;
935 struct sw_flow_key key
;
936 struct sk_buff
*reply
;
937 struct sw_flow
*flow
;
939 struct sw_flow_match match
;
943 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
949 if (!a
[OVS_FLOW_ATTR_KEY
]) {
950 err
= ovs_flow_tbl_flush(&dp
->table
);
954 ovs_match_init(&match
, &key
, NULL
);
955 err
= ovs_nla_get_match(&match
, a
[OVS_FLOW_ATTR_KEY
], NULL
);
959 flow
= ovs_flow_tbl_lookup(&dp
->table
, &key
);
960 if (!flow
|| !ovs_flow_cmp_unmasked_key(flow
, &match
)) {
965 reply
= ovs_flow_cmd_alloc_info(flow
);
971 ovs_flow_tbl_remove(&dp
->table
, flow
);
973 err
= ovs_flow_cmd_fill_info(flow
, dp
, reply
, info
->snd_portid
,
974 info
->snd_seq
, 0, OVS_FLOW_CMD_DEL
);
977 ovs_flow_free(flow
, true);
980 ovs_notify(reply
, info
, &ovs_dp_flow_multicast_group
);
987 static int ovs_flow_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
989 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
990 struct table_instance
*ti
;
994 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1000 ti
= rcu_dereference(dp
->table
.ti
);
1002 struct sw_flow
*flow
;
1005 bucket
= cb
->args
[0];
1007 flow
= ovs_flow_tbl_dump_next(ti
, &bucket
, &obj
);
1011 if (ovs_flow_cmd_fill_info(flow
, dp
, skb
,
1012 NETLINK_CB(cb
->skb
).portid
,
1013 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1014 OVS_FLOW_CMD_NEW
) < 0)
1017 cb
->args
[0] = bucket
;
1024 static struct genl_ops dp_flow_genl_ops
[] = {
1025 { .cmd
= OVS_FLOW_CMD_NEW
,
1026 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1027 .policy
= flow_policy
,
1028 .doit
= ovs_flow_cmd_new_or_set
1030 { .cmd
= OVS_FLOW_CMD_DEL
,
1031 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1032 .policy
= flow_policy
,
1033 .doit
= ovs_flow_cmd_del
1035 { .cmd
= OVS_FLOW_CMD_GET
,
1036 .flags
= 0, /* OK for unprivileged users. */
1037 .policy
= flow_policy
,
1038 .doit
= ovs_flow_cmd_get
,
1039 .dumpit
= ovs_flow_cmd_dump
1041 { .cmd
= OVS_FLOW_CMD_SET
,
1042 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1043 .policy
= flow_policy
,
1044 .doit
= ovs_flow_cmd_new_or_set
,
1048 static const struct nla_policy datapath_policy
[OVS_DP_ATTR_MAX
+ 1] = {
1049 [OVS_DP_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
1050 [OVS_DP_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
1053 static struct genl_family dp_datapath_genl_family
= {
1054 .id
= GENL_ID_GENERATE
,
1055 .hdrsize
= sizeof(struct ovs_header
),
1056 .name
= OVS_DATAPATH_FAMILY
,
1057 .version
= OVS_DATAPATH_VERSION
,
1058 .maxattr
= OVS_DP_ATTR_MAX
,
1063 static struct genl_multicast_group ovs_dp_datapath_multicast_group
= {
1064 .name
= OVS_DATAPATH_MCGROUP
1067 static size_t ovs_dp_cmd_msg_size(void)
1069 size_t msgsize
= NLMSG_ALIGN(sizeof(struct ovs_header
));
1071 msgsize
+= nla_total_size(IFNAMSIZ
);
1072 msgsize
+= nla_total_size(sizeof(struct ovs_dp_stats
));
1077 static int ovs_dp_cmd_fill_info(struct datapath
*dp
, struct sk_buff
*skb
,
1078 u32 portid
, u32 seq
, u32 flags
, u8 cmd
)
1080 struct ovs_header
*ovs_header
;
1081 struct ovs_dp_stats dp_stats
;
1084 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_datapath_genl_family
,
1089 ovs_header
->dp_ifindex
= get_dpifindex(dp
);
1092 err
= nla_put_string(skb
, OVS_DP_ATTR_NAME
, ovs_dp_name(dp
));
1095 goto nla_put_failure
;
1097 get_dp_stats(dp
, &dp_stats
);
1098 if (nla_put(skb
, OVS_DP_ATTR_STATS
, sizeof(struct ovs_dp_stats
), &dp_stats
))
1099 goto nla_put_failure
;
1101 return genlmsg_end(skb
, ovs_header
);
1104 genlmsg_cancel(skb
, ovs_header
);
1109 static struct sk_buff
*ovs_dp_cmd_build_info(struct datapath
*dp
, u32 portid
,
1112 struct sk_buff
*skb
;
1115 skb
= genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL
);
1117 return ERR_PTR(-ENOMEM
);
1119 retval
= ovs_dp_cmd_fill_info(dp
, skb
, portid
, seq
, 0, cmd
);
1122 return ERR_PTR(retval
);
1127 /* Called with ovs_mutex. */
1128 static struct datapath
*lookup_datapath(struct net
*net
,
1129 struct ovs_header
*ovs_header
,
1130 struct nlattr
*a
[OVS_DP_ATTR_MAX
+ 1])
1132 struct datapath
*dp
;
1134 if (!a
[OVS_DP_ATTR_NAME
])
1135 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
1137 struct vport
*vport
;
1140 vport
= ovs_vport_locate(net
, nla_data(a
[OVS_DP_ATTR_NAME
]));
1141 dp
= vport
&& vport
->port_no
== OVSP_LOCAL
? vport
->dp
: NULL
;
1144 return dp
? dp
: ERR_PTR(-ENODEV
);
1147 static int ovs_dp_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1149 struct nlattr
**a
= info
->attrs
;
1150 struct vport_parms parms
;
1151 struct sk_buff
*reply
;
1152 struct datapath
*dp
;
1153 struct vport
*vport
;
1154 struct ovs_net
*ovs_net
;
1158 if (!a
[OVS_DP_ATTR_NAME
] || !a
[OVS_DP_ATTR_UPCALL_PID
])
1164 dp
= kzalloc(sizeof(*dp
), GFP_KERNEL
);
1166 goto err_unlock_ovs
;
1168 ovs_dp_set_net(dp
, hold_net(sock_net(skb
->sk
)));
1170 /* Allocate table. */
1171 err
= ovs_flow_tbl_init(&dp
->table
);
1175 dp
->stats_percpu
= alloc_percpu(struct dp_stats_percpu
);
1176 if (!dp
->stats_percpu
) {
1178 goto err_destroy_table
;
1181 dp
->ports
= kmalloc(DP_VPORT_HASH_BUCKETS
* sizeof(struct hlist_head
),
1185 goto err_destroy_percpu
;
1188 for (i
= 0; i
< DP_VPORT_HASH_BUCKETS
; i
++)
1189 INIT_HLIST_HEAD(&dp
->ports
[i
]);
1191 /* Set up our datapath device. */
1192 parms
.name
= nla_data(a
[OVS_DP_ATTR_NAME
]);
1193 parms
.type
= OVS_VPORT_TYPE_INTERNAL
;
1194 parms
.options
= NULL
;
1196 parms
.port_no
= OVSP_LOCAL
;
1197 parms
.upcall_portid
= nla_get_u32(a
[OVS_DP_ATTR_UPCALL_PID
]);
1199 vport
= new_vport(&parms
);
1200 if (IS_ERR(vport
)) {
1201 err
= PTR_ERR(vport
);
1205 goto err_destroy_ports_array
;
1208 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1209 info
->snd_seq
, OVS_DP_CMD_NEW
);
1210 err
= PTR_ERR(reply
);
1212 goto err_destroy_local_port
;
1214 ovs_net
= net_generic(ovs_dp_get_net(dp
), ovs_net_id
);
1215 list_add_tail_rcu(&dp
->list_node
, &ovs_net
->dps
);
1219 ovs_notify(reply
, info
, &ovs_dp_datapath_multicast_group
);
1222 err_destroy_local_port
:
1223 ovs_dp_detach_port(ovs_vport_ovsl(dp
, OVSP_LOCAL
));
1224 err_destroy_ports_array
:
1227 free_percpu(dp
->stats_percpu
);
1229 ovs_flow_tbl_destroy(&dp
->table
);
1231 release_net(ovs_dp_get_net(dp
));
1239 /* Called with ovs_mutex. */
1240 static void __dp_destroy(struct datapath
*dp
)
1244 for (i
= 0; i
< DP_VPORT_HASH_BUCKETS
; i
++) {
1245 struct vport
*vport
;
1246 struct hlist_node
*n
;
1248 hlist_for_each_entry_safe(vport
, n
, &dp
->ports
[i
], dp_hash_node
)
1249 if (vport
->port_no
!= OVSP_LOCAL
)
1250 ovs_dp_detach_port(vport
);
1253 list_del_rcu(&dp
->list_node
);
1255 /* OVSP_LOCAL is datapath internal port. We need to make sure that
1256 * all port in datapath are destroyed first before freeing datapath.
1258 ovs_dp_detach_port(ovs_vport_ovsl(dp
, OVSP_LOCAL
));
1260 call_rcu(&dp
->rcu
, destroy_dp_rcu
);
1263 static int ovs_dp_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1265 struct sk_buff
*reply
;
1266 struct datapath
*dp
;
1270 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1275 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1276 info
->snd_seq
, OVS_DP_CMD_DEL
);
1277 err
= PTR_ERR(reply
);
1284 ovs_notify(reply
, info
, &ovs_dp_datapath_multicast_group
);
1292 static int ovs_dp_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1294 struct sk_buff
*reply
;
1295 struct datapath
*dp
;
1299 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1304 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1305 info
->snd_seq
, OVS_DP_CMD_NEW
);
1306 if (IS_ERR(reply
)) {
1307 err
= PTR_ERR(reply
);
1308 netlink_set_err(sock_net(skb
->sk
)->genl_sock
, 0,
1309 ovs_dp_datapath_multicast_group
.id
, err
);
1315 ovs_notify(reply
, info
, &ovs_dp_datapath_multicast_group
);
1323 static int ovs_dp_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1325 struct sk_buff
*reply
;
1326 struct datapath
*dp
;
1330 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1336 reply
= ovs_dp_cmd_build_info(dp
, info
->snd_portid
,
1337 info
->snd_seq
, OVS_DP_CMD_NEW
);
1338 if (IS_ERR(reply
)) {
1339 err
= PTR_ERR(reply
);
1344 return genlmsg_reply(reply
, info
);
1351 static int ovs_dp_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1353 struct ovs_net
*ovs_net
= net_generic(sock_net(skb
->sk
), ovs_net_id
);
1354 struct datapath
*dp
;
1355 int skip
= cb
->args
[0];
1359 list_for_each_entry_rcu(dp
, &ovs_net
->dps
, list_node
) {
1361 ovs_dp_cmd_fill_info(dp
, skb
, NETLINK_CB(cb
->skb
).portid
,
1362 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1363 OVS_DP_CMD_NEW
) < 0)
1374 static struct genl_ops dp_datapath_genl_ops
[] = {
1375 { .cmd
= OVS_DP_CMD_NEW
,
1376 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1377 .policy
= datapath_policy
,
1378 .doit
= ovs_dp_cmd_new
1380 { .cmd
= OVS_DP_CMD_DEL
,
1381 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1382 .policy
= datapath_policy
,
1383 .doit
= ovs_dp_cmd_del
1385 { .cmd
= OVS_DP_CMD_GET
,
1386 .flags
= 0, /* OK for unprivileged users. */
1387 .policy
= datapath_policy
,
1388 .doit
= ovs_dp_cmd_get
,
1389 .dumpit
= ovs_dp_cmd_dump
1391 { .cmd
= OVS_DP_CMD_SET
,
1392 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1393 .policy
= datapath_policy
,
1394 .doit
= ovs_dp_cmd_set
,
1398 static const struct nla_policy vport_policy
[OVS_VPORT_ATTR_MAX
+ 1] = {
1399 [OVS_VPORT_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
1400 [OVS_VPORT_ATTR_STATS
] = { .len
= sizeof(struct ovs_vport_stats
) },
1401 [OVS_VPORT_ATTR_PORT_NO
] = { .type
= NLA_U32
},
1402 [OVS_VPORT_ATTR_TYPE
] = { .type
= NLA_U32
},
1403 [OVS_VPORT_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
1404 [OVS_VPORT_ATTR_OPTIONS
] = { .type
= NLA_NESTED
},
1407 static struct genl_family dp_vport_genl_family
= {
1408 .id
= GENL_ID_GENERATE
,
1409 .hdrsize
= sizeof(struct ovs_header
),
1410 .name
= OVS_VPORT_FAMILY
,
1411 .version
= OVS_VPORT_VERSION
,
1412 .maxattr
= OVS_VPORT_ATTR_MAX
,
1417 struct genl_multicast_group ovs_dp_vport_multicast_group
= {
1418 .name
= OVS_VPORT_MCGROUP
1421 /* Called with ovs_mutex or RCU read lock. */
1422 static int ovs_vport_cmd_fill_info(struct vport
*vport
, struct sk_buff
*skb
,
1423 u32 portid
, u32 seq
, u32 flags
, u8 cmd
)
1425 struct ovs_header
*ovs_header
;
1426 struct ovs_vport_stats vport_stats
;
1429 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_vport_genl_family
,
1434 ovs_header
->dp_ifindex
= get_dpifindex(vport
->dp
);
1436 if (nla_put_u32(skb
, OVS_VPORT_ATTR_PORT_NO
, vport
->port_no
) ||
1437 nla_put_u32(skb
, OVS_VPORT_ATTR_TYPE
, vport
->ops
->type
) ||
1438 nla_put_string(skb
, OVS_VPORT_ATTR_NAME
, vport
->ops
->get_name(vport
)) ||
1439 nla_put_u32(skb
, OVS_VPORT_ATTR_UPCALL_PID
, vport
->upcall_portid
))
1440 goto nla_put_failure
;
1442 ovs_vport_get_stats(vport
, &vport_stats
);
1443 if (nla_put(skb
, OVS_VPORT_ATTR_STATS
, sizeof(struct ovs_vport_stats
),
1445 goto nla_put_failure
;
1447 err
= ovs_vport_get_options(vport
, skb
);
1448 if (err
== -EMSGSIZE
)
1451 return genlmsg_end(skb
, ovs_header
);
1456 genlmsg_cancel(skb
, ovs_header
);
1460 /* Called with ovs_mutex or RCU read lock. */
1461 struct sk_buff
*ovs_vport_cmd_build_info(struct vport
*vport
, u32 portid
,
1464 struct sk_buff
*skb
;
1467 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_ATOMIC
);
1469 return ERR_PTR(-ENOMEM
);
1471 retval
= ovs_vport_cmd_fill_info(vport
, skb
, portid
, seq
, 0, cmd
);
1477 /* Called with ovs_mutex or RCU read lock. */
1478 static struct vport
*lookup_vport(struct net
*net
,
1479 struct ovs_header
*ovs_header
,
1480 struct nlattr
*a
[OVS_VPORT_ATTR_MAX
+ 1])
1482 struct datapath
*dp
;
1483 struct vport
*vport
;
1485 if (a
[OVS_VPORT_ATTR_NAME
]) {
1486 vport
= ovs_vport_locate(net
, nla_data(a
[OVS_VPORT_ATTR_NAME
]));
1488 return ERR_PTR(-ENODEV
);
1489 if (ovs_header
->dp_ifindex
&&
1490 ovs_header
->dp_ifindex
!= get_dpifindex(vport
->dp
))
1491 return ERR_PTR(-ENODEV
);
1493 } else if (a
[OVS_VPORT_ATTR_PORT_NO
]) {
1494 u32 port_no
= nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]);
1496 if (port_no
>= DP_MAX_PORTS
)
1497 return ERR_PTR(-EFBIG
);
1499 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
1501 return ERR_PTR(-ENODEV
);
1503 vport
= ovs_vport_ovsl_rcu(dp
, port_no
);
1505 return ERR_PTR(-ENODEV
);
1508 return ERR_PTR(-EINVAL
);
1511 static int ovs_vport_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1513 struct nlattr
**a
= info
->attrs
;
1514 struct ovs_header
*ovs_header
= info
->userhdr
;
1515 struct vport_parms parms
;
1516 struct sk_buff
*reply
;
1517 struct vport
*vport
;
1518 struct datapath
*dp
;
1523 if (!a
[OVS_VPORT_ATTR_NAME
] || !a
[OVS_VPORT_ATTR_TYPE
] ||
1524 !a
[OVS_VPORT_ATTR_UPCALL_PID
])
1528 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1533 if (a
[OVS_VPORT_ATTR_PORT_NO
]) {
1534 port_no
= nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]);
1537 if (port_no
>= DP_MAX_PORTS
)
1540 vport
= ovs_vport_ovsl(dp
, port_no
);
1545 for (port_no
= 1; ; port_no
++) {
1546 if (port_no
>= DP_MAX_PORTS
) {
1550 vport
= ovs_vport_ovsl(dp
, port_no
);
1556 parms
.name
= nla_data(a
[OVS_VPORT_ATTR_NAME
]);
1557 parms
.type
= nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]);
1558 parms
.options
= a
[OVS_VPORT_ATTR_OPTIONS
];
1560 parms
.port_no
= port_no
;
1561 parms
.upcall_portid
= nla_get_u32(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
1563 vport
= new_vport(&parms
);
1564 err
= PTR_ERR(vport
);
1569 if (a
[OVS_VPORT_ATTR_STATS
])
1570 ovs_vport_set_stats(vport
, nla_data(a
[OVS_VPORT_ATTR_STATS
]));
1572 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
, info
->snd_seq
,
1574 if (IS_ERR(reply
)) {
1575 err
= PTR_ERR(reply
);
1576 ovs_dp_detach_port(vport
);
1580 ovs_notify(reply
, info
, &ovs_dp_vport_multicast_group
);
1588 static int ovs_vport_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1590 struct nlattr
**a
= info
->attrs
;
1591 struct sk_buff
*reply
;
1592 struct vport
*vport
;
1596 vport
= lookup_vport(sock_net(skb
->sk
), info
->userhdr
, a
);
1597 err
= PTR_ERR(vport
);
1601 if (a
[OVS_VPORT_ATTR_TYPE
] &&
1602 nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]) != vport
->ops
->type
) {
1607 reply
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1613 if (a
[OVS_VPORT_ATTR_OPTIONS
]) {
1614 err
= ovs_vport_set_options(vport
, a
[OVS_VPORT_ATTR_OPTIONS
]);
1619 if (a
[OVS_VPORT_ATTR_STATS
])
1620 ovs_vport_set_stats(vport
, nla_data(a
[OVS_VPORT_ATTR_STATS
]));
1622 if (a
[OVS_VPORT_ATTR_UPCALL_PID
])
1623 vport
->upcall_portid
= nla_get_u32(a
[OVS_VPORT_ATTR_UPCALL_PID
]);
1625 err
= ovs_vport_cmd_fill_info(vport
, reply
, info
->snd_portid
,
1626 info
->snd_seq
, 0, OVS_VPORT_CMD_NEW
);
1630 ovs_notify(reply
, info
, &ovs_dp_vport_multicast_group
);
1640 static int ovs_vport_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1642 struct nlattr
**a
= info
->attrs
;
1643 struct sk_buff
*reply
;
1644 struct vport
*vport
;
1648 vport
= lookup_vport(sock_net(skb
->sk
), info
->userhdr
, a
);
1649 err
= PTR_ERR(vport
);
1653 if (vport
->port_no
== OVSP_LOCAL
) {
1658 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
,
1659 info
->snd_seq
, OVS_VPORT_CMD_DEL
);
1660 err
= PTR_ERR(reply
);
1665 ovs_dp_detach_port(vport
);
1667 ovs_notify(reply
, info
, &ovs_dp_vport_multicast_group
);
1674 static int ovs_vport_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1676 struct nlattr
**a
= info
->attrs
;
1677 struct ovs_header
*ovs_header
= info
->userhdr
;
1678 struct sk_buff
*reply
;
1679 struct vport
*vport
;
1683 vport
= lookup_vport(sock_net(skb
->sk
), ovs_header
, a
);
1684 err
= PTR_ERR(vport
);
1688 reply
= ovs_vport_cmd_build_info(vport
, info
->snd_portid
,
1689 info
->snd_seq
, OVS_VPORT_CMD_NEW
);
1690 err
= PTR_ERR(reply
);
1696 return genlmsg_reply(reply
, info
);
1703 static int ovs_vport_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1705 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
1706 struct datapath
*dp
;
1707 int bucket
= cb
->args
[0], skip
= cb
->args
[1];
1710 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1715 for (i
= bucket
; i
< DP_VPORT_HASH_BUCKETS
; i
++) {
1716 struct vport
*vport
;
1719 hlist_for_each_entry_rcu(vport
, &dp
->ports
[i
], dp_hash_node
) {
1721 ovs_vport_cmd_fill_info(vport
, skb
,
1722 NETLINK_CB(cb
->skb
).portid
,
1725 OVS_VPORT_CMD_NEW
) < 0)
1741 static struct genl_ops dp_vport_genl_ops
[] = {
1742 { .cmd
= OVS_VPORT_CMD_NEW
,
1743 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1744 .policy
= vport_policy
,
1745 .doit
= ovs_vport_cmd_new
1747 { .cmd
= OVS_VPORT_CMD_DEL
,
1748 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1749 .policy
= vport_policy
,
1750 .doit
= ovs_vport_cmd_del
1752 { .cmd
= OVS_VPORT_CMD_GET
,
1753 .flags
= 0, /* OK for unprivileged users. */
1754 .policy
= vport_policy
,
1755 .doit
= ovs_vport_cmd_get
,
1756 .dumpit
= ovs_vport_cmd_dump
1758 { .cmd
= OVS_VPORT_CMD_SET
,
1759 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1760 .policy
= vport_policy
,
1761 .doit
= ovs_vport_cmd_set
,
1765 struct genl_family_and_ops
{
1766 struct genl_family
*family
;
1767 struct genl_ops
*ops
;
1769 struct genl_multicast_group
*group
;
1772 static const struct genl_family_and_ops dp_genl_families
[] = {
1773 { &dp_datapath_genl_family
,
1774 dp_datapath_genl_ops
, ARRAY_SIZE(dp_datapath_genl_ops
),
1775 &ovs_dp_datapath_multicast_group
},
1776 { &dp_vport_genl_family
,
1777 dp_vport_genl_ops
, ARRAY_SIZE(dp_vport_genl_ops
),
1778 &ovs_dp_vport_multicast_group
},
1779 { &dp_flow_genl_family
,
1780 dp_flow_genl_ops
, ARRAY_SIZE(dp_flow_genl_ops
),
1781 &ovs_dp_flow_multicast_group
},
1782 { &dp_packet_genl_family
,
1783 dp_packet_genl_ops
, ARRAY_SIZE(dp_packet_genl_ops
),
1787 static void dp_unregister_genl(int n_families
)
1791 for (i
= 0; i
< n_families
; i
++)
1792 genl_unregister_family(dp_genl_families
[i
].family
);
1795 static int dp_register_genl(void)
1802 for (i
= 0; i
< ARRAY_SIZE(dp_genl_families
); i
++) {
1803 const struct genl_family_and_ops
*f
= &dp_genl_families
[i
];
1805 err
= genl_register_family_with_ops(f
->family
, f
->ops
,
1812 err
= genl_register_mc_group(f
->family
, f
->group
);
1821 dp_unregister_genl(n_registered
);
1825 static int __net_init
ovs_init_net(struct net
*net
)
1827 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
1829 INIT_LIST_HEAD(&ovs_net
->dps
);
1830 INIT_WORK(&ovs_net
->dp_notify_work
, ovs_dp_notify_wq
);
1834 static void __net_exit
ovs_exit_net(struct net
*net
)
1836 struct datapath
*dp
, *dp_next
;
1837 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
1840 list_for_each_entry_safe(dp
, dp_next
, &ovs_net
->dps
, list_node
)
1844 cancel_work_sync(&ovs_net
->dp_notify_work
);
1847 static struct pernet_operations ovs_net_ops
= {
1848 .init
= ovs_init_net
,
1849 .exit
= ovs_exit_net
,
1851 .size
= sizeof(struct ovs_net
),
1854 DEFINE_COMPAT_PNET_REG_FUNC(device
);
1856 static int __init
dp_init(void)
1860 BUILD_BUG_ON(sizeof(struct ovs_skb_cb
) > FIELD_SIZEOF(struct sk_buff
, cb
));
1862 pr_info("Open vSwitch switching datapath %s, built "__DATE__
" "__TIME__
"\n",
1865 err
= ovs_flow_init();
1869 err
= ovs_vport_init();
1871 goto error_flow_exit
;
1873 err
= register_pernet_device(&ovs_net_ops
);
1875 goto error_vport_exit
;
1877 err
= register_netdevice_notifier(&ovs_dp_device_notifier
);
1879 goto error_netns_exit
;
1881 err
= dp_register_genl();
1883 goto error_unreg_notifier
;
1887 error_unreg_notifier
:
1888 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
1890 unregister_pernet_device(&ovs_net_ops
);
1899 static void dp_cleanup(void)
1901 dp_unregister_genl(ARRAY_SIZE(dp_genl_families
));
1902 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
1903 unregister_pernet_device(&ovs_net_ops
);
1909 module_init(dp_init
);
1910 module_exit(dp_cleanup
);
1912 MODULE_DESCRIPTION("Open vSwitch switching datapath");
1913 MODULE_LICENSE("GPL");
1914 MODULE_VERSION(VERSION
);