2 * Copyright (c) 2007, 2008, 2009 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #include <linux/init.h>
12 #include <linux/module.h>
14 #include <linux/if_arp.h>
15 #include <linux/if_bridge.h>
16 #include <linux/if_vlan.h>
19 #include <linux/delay.h>
20 #include <linux/time.h>
21 #include <linux/etherdevice.h>
22 #include <linux/kernel.h>
23 #include <linux/kthread.h>
24 #include <linux/llc.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/random.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/workqueue.h>
43 #include <linux/dmi.h>
46 #include "openvswitch/datapath-protocol.h"
55 int (*dp_ioctl_hook
)(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
56 EXPORT_SYMBOL(dp_ioctl_hook
);
58 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
59 * by dp_mutex. dp_mutex is almost completely redundant with genl_mutex
60 * maintained by the Generic Netlink code, but the timeout path needs mutual
63 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
66 * It is safe to access the datapath and net_bridge_port structures with just
69 static struct datapath
*dps
[ODP_MAX
];
70 static DEFINE_MUTEX(dp_mutex
);
72 /* Number of milliseconds between runs of the maintenance thread. */
73 #define MAINT_SLEEP_MSECS 1000
75 static int new_nbp(struct datapath
*, struct net_device
*, int port_no
);
77 /* Must be called with rcu_read_lock or dp_mutex. */
78 struct datapath
*get_dp(int dp_idx
)
80 if (dp_idx
< 0 || dp_idx
>= ODP_MAX
)
82 return rcu_dereference(dps
[dp_idx
]);
84 EXPORT_SYMBOL_GPL(get_dp
);
86 struct datapath
*get_dp_locked(int dp_idx
)
90 mutex_lock(&dp_mutex
);
93 mutex_lock(&dp
->mutex
);
94 mutex_unlock(&dp_mutex
);
98 static inline size_t br_nlmsg_size(void)
100 return NLMSG_ALIGN(sizeof(struct ifinfomsg
))
101 + nla_total_size(IFNAMSIZ
) /* IFLA_IFNAME */
102 + nla_total_size(MAX_ADDR_LEN
) /* IFLA_ADDRESS */
103 + nla_total_size(4) /* IFLA_MASTER */
104 + nla_total_size(4) /* IFLA_MTU */
105 + nla_total_size(4) /* IFLA_LINK */
106 + nla_total_size(1); /* IFLA_OPERSTATE */
109 static int dp_fill_ifinfo(struct sk_buff
*skb
,
110 const struct net_bridge_port
*port
,
111 int event
, unsigned int flags
)
113 const struct datapath
*dp
= port
->dp
;
114 const struct net_device
*dev
= port
->dev
;
115 struct ifinfomsg
*hdr
;
116 struct nlmsghdr
*nlh
;
118 nlh
= nlmsg_put(skb
, 0, 0, event
, sizeof(*hdr
), flags
);
122 hdr
= nlmsg_data(nlh
);
123 hdr
->ifi_family
= AF_BRIDGE
;
125 hdr
->ifi_type
= dev
->type
;
126 hdr
->ifi_index
= dev
->ifindex
;
127 hdr
->ifi_flags
= dev_get_flags(dev
);
130 NLA_PUT_STRING(skb
, IFLA_IFNAME
, dev
->name
);
131 NLA_PUT_U32(skb
, IFLA_MASTER
, dp
->ports
[ODPP_LOCAL
]->dev
->ifindex
);
132 NLA_PUT_U32(skb
, IFLA_MTU
, dev
->mtu
);
133 #ifdef IFLA_OPERSTATE
134 NLA_PUT_U8(skb
, IFLA_OPERSTATE
,
135 netif_running(dev
) ? dev
->operstate
: IF_OPER_DOWN
);
139 NLA_PUT(skb
, IFLA_ADDRESS
, dev
->addr_len
, dev
->dev_addr
);
141 if (dev
->ifindex
!= dev
->iflink
)
142 NLA_PUT_U32(skb
, IFLA_LINK
, dev
->iflink
);
144 return nlmsg_end(skb
, nlh
);
147 nlmsg_cancel(skb
, nlh
);
151 static void dp_ifinfo_notify(int event
, struct net_bridge_port
*port
)
153 struct net
*net
= dev_net(port
->dev
);
157 skb
= nlmsg_new(br_nlmsg_size(), GFP_KERNEL
);
161 err
= dp_fill_ifinfo(skb
, port
, event
, 0);
163 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
164 WARN_ON(err
== -EMSGSIZE
);
168 rtnl_notify(skb
, net
, 0, RTNLGRP_LINK
, NULL
, GFP_KERNEL
);
172 rtnl_set_sk_err(net
, RTNLGRP_LINK
, err
);
175 static void release_dp(struct kobject
*kobj
)
177 struct datapath
*dp
= container_of(kobj
, struct datapath
, ifobj
);
181 struct kobj_type dp_ktype
= {
182 .release
= release_dp
185 static int create_dp(int dp_idx
, const char __user
*devnamep
)
187 struct net_device
*dp_dev
;
188 char devname
[IFNAMSIZ
];
195 if (strncpy_from_user(devname
, devnamep
, IFNAMSIZ
- 1) < 0)
197 devname
[IFNAMSIZ
- 1] = '\0';
199 snprintf(devname
, sizeof devname
, "of%d", dp_idx
);
203 mutex_lock(&dp_mutex
);
205 if (!try_module_get(THIS_MODULE
))
208 /* Exit early if a datapath with that number already exists.
209 * (We don't use -EEXIST because that's ambiguous with 'devname'
210 * conflicting with an existing network device name.) */
216 dp
= kzalloc(sizeof *dp
, GFP_KERNEL
);
219 INIT_LIST_HEAD(&dp
->port_list
);
220 mutex_init(&dp
->mutex
);
222 for (i
= 0; i
< DP_N_QUEUES
; i
++)
223 skb_queue_head_init(&dp
->queues
[i
]);
224 init_waitqueue_head(&dp
->waitqueue
);
226 /* Initialize kobject for bridge. This will be added as
227 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
228 dp
->ifobj
.kset
= NULL
;
229 kobject_init(&dp
->ifobj
, &dp_ktype
);
231 /* Allocate table. */
233 rcu_assign_pointer(dp
->table
, dp_table_create(DP_L1_SIZE
));
237 /* Setup our datapath device */
238 dp_dev
= dp_dev_create(dp
, devname
, ODPP_LOCAL
);
239 err
= PTR_ERR(dp_dev
);
241 goto err_destroy_table
;
243 err
= new_nbp(dp
, dp_dev
, ODPP_LOCAL
);
245 dp_dev_destroy(dp_dev
);
246 goto err_destroy_table
;
250 dp
->stats_percpu
= alloc_percpu(struct dp_stats_percpu
);
251 if (!dp
->stats_percpu
)
252 goto err_destroy_local_port
;
254 rcu_assign_pointer(dps
[dp_idx
], dp
);
255 mutex_unlock(&dp_mutex
);
262 err_destroy_local_port
:
263 dp_del_port(dp
->ports
[ODPP_LOCAL
]);
265 dp_table_destroy(dp
->table
, 0);
269 module_put(THIS_MODULE
);
271 mutex_unlock(&dp_mutex
);
277 static void do_destroy_dp(struct datapath
*dp
)
279 struct net_bridge_port
*p
, *n
;
282 list_for_each_entry_safe (p
, n
, &dp
->port_list
, node
)
283 if (p
->port_no
!= ODPP_LOCAL
)
288 rcu_assign_pointer(dps
[dp
->dp_idx
], NULL
);
290 dp_del_port(dp
->ports
[ODPP_LOCAL
]);
292 dp_table_destroy(dp
->table
, 1);
294 for (i
= 0; i
< DP_N_QUEUES
; i
++)
295 skb_queue_purge(&dp
->queues
[i
]);
296 for (i
= 0; i
< DP_MAX_GROUPS
; i
++)
297 kfree(dp
->groups
[i
]);
298 free_percpu(dp
->stats_percpu
);
299 kobject_put(&dp
->ifobj
);
300 module_put(THIS_MODULE
);
303 static int destroy_dp(int dp_idx
)
309 mutex_lock(&dp_mutex
);
319 mutex_unlock(&dp_mutex
);
324 static void release_nbp(struct kobject
*kobj
)
326 struct net_bridge_port
*p
= container_of(kobj
, struct net_bridge_port
, kobj
);
330 struct kobj_type brport_ktype
= {
332 .sysfs_ops
= &brport_sysfs_ops
,
334 .release
= release_nbp
337 /* Called with RTNL lock and dp_mutex. */
338 static int new_nbp(struct datapath
*dp
, struct net_device
*dev
, int port_no
)
340 struct net_bridge_port
*p
;
342 if (dev
->br_port
!= NULL
)
345 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
349 dev_set_promiscuity(dev
, 1);
351 p
->port_no
= port_no
;
355 rcu_assign_pointer(dev
->br_port
, p
);
357 /* It would make sense to assign dev->br_port here too, but
358 * that causes packets received on internal ports to get caught
359 * in dp_frame_hook(). In turn dp_frame_hook() can reject them
360 * back to network stack, but that's a waste of time. */
362 rcu_assign_pointer(dp
->ports
[port_no
], p
);
363 list_add_rcu(&p
->node
, &dp
->port_list
);
366 /* Initialize kobject for bridge. This will be added as
367 * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
369 kobject_init(&p
->kobj
, &brport_ktype
);
371 dp_ifinfo_notify(RTM_NEWLINK
, p
);
376 static int add_port(int dp_idx
, struct odp_port __user
*portp
)
378 struct net_device
*dev
;
380 struct odp_port port
;
385 if (copy_from_user(&port
, portp
, sizeof port
))
387 port
.devname
[IFNAMSIZ
- 1] = '\0';
390 dp
= get_dp_locked(dp_idx
);
393 goto out_unlock_rtnl
;
395 for (port_no
= 1; port_no
< DP_MAX_PORTS
; port_no
++)
396 if (!dp
->ports
[port_no
])
402 if (!(port
.flags
& ODP_PORT_INTERNAL
)) {
404 dev
= dev_get_by_name(&init_net
, port
.devname
);
409 if (dev
->flags
& IFF_LOOPBACK
|| dev
->type
!= ARPHRD_ETHER
||
413 dev
= dp_dev_create(dp
, port
.devname
, port_no
);
420 err
= new_nbp(dp
, dev
, port_no
);
424 dp_sysfs_add_if(dp
->ports
[port_no
]);
426 err
= __put_user(port_no
, &port
.port
);
431 mutex_unlock(&dp
->mutex
);
438 int dp_del_port(struct net_bridge_port
*p
)
442 if (p
->port_no
!= ODPP_LOCAL
)
444 dp_ifinfo_notify(RTM_DELLINK
, p
);
448 if (is_dp_dev(p
->dev
)) {
449 /* Make sure that no packets arrive from now on, since
450 * dp_dev_xmit() will try to find itself through
451 * p->dp->ports[], and we're about to set that to null. */
452 netif_tx_disable(p
->dev
);
455 /* First drop references to device. */
456 dev_set_promiscuity(p
->dev
, -1);
457 list_del_rcu(&p
->node
);
458 rcu_assign_pointer(p
->dp
->ports
[p
->port_no
], NULL
);
459 rcu_assign_pointer(p
->dev
->br_port
, NULL
);
461 /* Then wait until no one is still using it, and destroy it. */
464 if (is_dp_dev(p
->dev
))
465 dp_dev_destroy(p
->dev
);
467 kobject_put(&p
->kobj
);
472 static int del_port(int dp_idx
, int port_no
)
474 struct net_bridge_port
*p
;
480 if (port_no
< 0 || port_no
>= DP_MAX_PORTS
|| port_no
== ODPP_LOCAL
)
484 dp
= get_dp_locked(dp_idx
);
487 goto out_unlock_rtnl
;
489 p
= dp
->ports
[port_no
];
494 err
= dp_del_port(p
);
497 mutex_unlock(&dp
->mutex
);
504 /* Must be called with rcu_read_lock. */
506 do_port_input(struct net_bridge_port
*p
, struct sk_buff
*skb
)
508 /* Make our own copy of the packet. Otherwise we will mangle the
509 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
510 * (No one comes after us, since we tell handle_bridge() that we took
512 skb
= skb_share_check(skb
, GFP_ATOMIC
);
516 /* Push the Ethernet header back on. */
517 skb_push(skb
, ETH_HLEN
);
518 skb_reset_mac_header(skb
);
519 dp_process_received_packet(skb
, p
);
522 /* Must be called with rcu_read_lock and with bottom-halves disabled. */
523 void dp_process_received_packet(struct sk_buff
*skb
, struct net_bridge_port
*p
)
525 struct datapath
*dp
= p
->dp
;
526 struct dp_stats_percpu
*stats
;
527 struct odp_flow_key key
;
528 struct sw_flow
*flow
;
530 WARN_ON_ONCE(skb_shared(skb
));
532 /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
533 stats
= percpu_ptr(dp
->stats_percpu
, smp_processor_id());
535 if (flow_extract(skb
, p
? p
->port_no
: ODPP_NONE
, &key
)) {
536 if (dp
->drop_frags
) {
543 flow
= dp_table_lookup(rcu_dereference(dp
->table
), &key
);
545 struct sw_flow_actions
*acts
= rcu_dereference(flow
->sf_acts
);
546 flow_used(flow
, skb
);
547 execute_actions(dp
, skb
, &key
, acts
->actions
, acts
->n_actions
,
552 dp_output_control(dp
, skb
, _ODPL_MISS_NR
, 0);
557 * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on
558 * different set of devices!)
560 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
561 /* Called with rcu_read_lock and bottom-halves disabled. */
562 static struct sk_buff
*dp_frame_hook(struct net_bridge_port
*p
,
565 do_port_input(p
, skb
);
568 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
569 /* Called with rcu_read_lock and bottom-halves disabled. */
570 static int dp_frame_hook(struct net_bridge_port
*p
, struct sk_buff
**pskb
)
572 do_port_input(p
, *pskb
);
579 #if defined(CONFIG_XEN) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,18)
580 /* This code is copied verbatim from net/dev/core.c in Xen's
581 * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call those functions
582 * directly because they aren't exported. */
583 static int skb_pull_up_to(struct sk_buff
*skb
, void *ptr
)
585 if (ptr
< (void *)skb
->tail
)
587 if (__pskb_pull_tail(skb
,
588 ptr
- (void *)skb
->data
- skb_headlen(skb
))) {
595 int vswitch_skb_checksum_setup(struct sk_buff
*skb
)
597 if (skb
->proto_csum_blank
) {
598 if (skb
->protocol
!= htons(ETH_P_IP
))
600 if (!skb_pull_up_to(skb
, skb
->nh
.iph
+ 1))
602 skb
->h
.raw
= (unsigned char *)skb
->nh
.iph
+ 4*skb
->nh
.iph
->ihl
;
603 switch (skb
->nh
.iph
->protocol
) {
605 skb
->csum
= offsetof(struct tcphdr
, check
);
608 skb
->csum
= offsetof(struct udphdr
, check
);
612 printk(KERN_ERR
"Attempting to checksum a non-"
613 "TCP/UDP packet, dropping a protocol"
614 " %d packet", skb
->nh
.iph
->protocol
);
617 if (!skb_pull_up_to(skb
, skb
->h
.raw
+ skb
->csum
+ 2))
619 skb
->ip_summed
= CHECKSUM_HW
;
620 skb
->proto_csum_blank
= 0;
627 int vswitch_skb_checksum_setup(struct sk_buff
*skb
) { return 0; }
628 #endif /* CONFIG_XEN && linux == 2.6.18 */
631 dp_output_control(struct datapath
*dp
, struct sk_buff
*skb
, int queue_no
,
634 struct dp_stats_percpu
*stats
;
635 struct sk_buff_head
*queue
;
639 WARN_ON_ONCE(skb_shared(skb
));
640 BUG_ON(queue_no
!= _ODPL_MISS_NR
&& queue_no
!= _ODPL_ACTION_NR
);
642 queue
= &dp
->queues
[queue_no
];
644 if (skb_queue_len(queue
) >= DP_MAX_QUEUE_LEN
)
647 /* If a checksum-deferred packet is forwarded to the controller,
648 * correct the pointers and checksum. This happens on a regular basis
649 * only on Xen (the CHECKSUM_HW case), on which VMs can pass up packets
650 * that do not have their checksum computed. We also implement it for
651 * the non-Xen case, but it is difficult to trigger or test this case
652 * there, hence the WARN_ON_ONCE().
654 err
= vswitch_skb_checksum_setup(skb
);
658 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
660 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
661 /* Until 2.6.22, the start of the transport header was also the
662 * start of data to be checksummed. Linux 2.6.22 introduced
663 * the csum_start field for this purpose, but we should point
664 * the transport header to it anyway for backward
665 * compatibility, as dev_queue_xmit() does even in 2.6.28. */
666 skb_set_transport_header(skb
, skb
->csum_start
-
669 err
= skb_checksum_help(skb
);
674 if (skb
->ip_summed
== CHECKSUM_HW
) {
675 err
= skb_checksum_help(skb
, 0);
681 /* Break apart GSO packets into their component pieces. Otherwise
682 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
683 if (skb_is_gso(skb
)) {
684 struct sk_buff
*nskb
= skb_gso_segment(skb
, 0);
688 if (unlikely(IS_ERR(skb
))) {
693 /* XXX This case might not be possible. It's hard to
694 * tell from the skb_gso_segment() code and comment. */
698 /* Figure out port number. */
699 port_no
= ODPP_LOCAL
;
701 if (skb
->dev
->br_port
)
702 port_no
= skb
->dev
->br_port
->port_no
;
703 else if (is_dp_dev(skb
->dev
))
704 port_no
= dp_dev_priv(skb
->dev
)->port_no
;
707 /* Append each packet to queue. There will be only one packet unless
708 * we broke up a GSO packet above. */
710 struct odp_msg
*header
;
711 struct sk_buff
*nskb
= skb
->next
;
714 err
= skb_cow(skb
, sizeof *header
);
724 header
= (struct odp_msg
*)__skb_push(skb
, sizeof *header
);
725 header
->type
= queue_no
;
726 header
->length
= skb
->len
;
727 header
->port
= port_no
;
728 header
->reserved
= 0;
730 skb_queue_tail(queue
, skb
);
735 wake_up_interruptible(&dp
->waitqueue
);
741 stats
= percpu_ptr(dp
->stats_percpu
, get_cpu());
748 static int flush_flows(struct datapath
*dp
)
751 return dp_table_flush(dp
);
754 static int validate_actions(const struct sw_flow_actions
*actions
)
758 for (i
= 0; i
< actions
->n_actions
; i
++) {
759 const union odp_action
*a
= &actions
->actions
[i
];
762 if (a
->output
.port
>= DP_MAX_PORTS
)
766 case ODPAT_OUTPUT_GROUP
:
767 if (a
->output_group
.group
>= DP_MAX_GROUPS
)
771 case ODPAT_SET_VLAN_VID
:
772 if (a
->vlan_vid
.vlan_vid
& htons(~VLAN_VID_MASK
))
776 case ODPAT_SET_VLAN_PCP
:
777 if (a
->vlan_pcp
.vlan_pcp
& ~VLAN_PCP_MASK
)
782 if (a
->type
>= ODPAT_N_ACTIONS
)
791 static struct sw_flow_actions
*get_actions(const struct odp_flow
*flow
)
793 struct sw_flow_actions
*actions
;
796 actions
= flow_actions_alloc(flow
->n_actions
);
797 error
= PTR_ERR(actions
);
802 if (copy_from_user(actions
->actions
, flow
->actions
,
803 flow
->n_actions
* sizeof(union odp_action
)))
804 goto error_free_actions
;
805 error
= validate_actions(actions
);
807 goto error_free_actions
;
814 return ERR_PTR(error
);
817 static void get_stats(struct sw_flow
*flow
, struct odp_flow_stats
*stats
)
819 if (flow
->used
.tv_sec
) {
820 stats
->used_sec
= flow
->used
.tv_sec
;
821 stats
->used_nsec
= flow
->used
.tv_nsec
;
824 stats
->used_nsec
= 0;
826 stats
->n_packets
= flow
->packet_count
;
827 stats
->n_bytes
= flow
->byte_count
;
828 stats
->ip_tos
= flow
->ip_tos
;
829 stats
->tcp_flags
= flow
->tcp_flags
;
833 static void clear_stats(struct sw_flow
*flow
)
835 flow
->used
.tv_sec
= flow
->used
.tv_nsec
= 0;
838 flow
->packet_count
= 0;
839 flow
->byte_count
= 0;
842 static int put_flow(struct datapath
*dp
, struct odp_flow_put __user
*ufp
)
844 struct odp_flow_put uf
;
845 struct sw_flow
*flow
, **bucket
;
846 struct dp_table
*table
;
847 struct odp_flow_stats stats
;
851 if (copy_from_user(&uf
, ufp
, sizeof(struct odp_flow_put
)))
853 uf
.flow
.key
.reserved
= 0;
856 table
= rcu_dereference(dp
->table
);
857 bucket
= dp_table_lookup_for_insert(table
, &uf
.flow
.key
);
859 /* No such flow, and the slots where it could go are full. */
860 error
= uf
.flags
& ODPPF_CREATE
? -EXFULL
: -ENOENT
;
862 } else if (!*bucket
) {
863 /* No such flow, but we found an available slot for it. */
864 struct sw_flow_actions
*acts
;
867 if (!(uf
.flags
& ODPPF_CREATE
))
870 /* Expand table, if necessary, to make room. */
871 if (dp
->n_flows
* 4 >= table
->n_buckets
&&
872 table
->n_buckets
< DP_MAX_BUCKETS
) {
873 error
= dp_table_expand(dp
);
877 /* The bucket's location has changed. Try again. */
883 flow
= kmem_cache_alloc(flow_cache
, GFP_KERNEL
);
886 flow
->key
= uf
.flow
.key
;
887 spin_lock_init(&flow
->lock
);
890 /* Obtain actions. */
891 acts
= get_actions(&uf
.flow
);
892 error
= PTR_ERR(acts
);
894 goto error_free_flow
;
895 rcu_assign_pointer(flow
->sf_acts
, acts
);
897 /* Put flow in bucket. */
898 rcu_assign_pointer(*bucket
, flow
);
900 memset(&stats
, 0, sizeof(struct odp_flow_stats
));
902 /* We found a matching flow. */
903 struct sw_flow
*flow
= *rcu_dereference(bucket
);
904 struct sw_flow_actions
*old_acts
, *new_acts
;
905 unsigned long int flags
;
907 /* Bail out if we're not allowed to modify an existing flow. */
909 if (!(uf
.flags
& ODPPF_MODIFY
))
913 new_acts
= get_actions(&uf
.flow
);
914 error
= PTR_ERR(new_acts
);
915 if (IS_ERR(new_acts
))
917 old_acts
= rcu_dereference(flow
->sf_acts
);
918 if (old_acts
->n_actions
!= new_acts
->n_actions
||
919 memcmp(old_acts
->actions
, new_acts
->actions
,
920 sizeof(union odp_action
) * old_acts
->n_actions
)) {
921 rcu_assign_pointer(flow
->sf_acts
, new_acts
);
922 flow_deferred_free_acts(old_acts
);
927 /* Fetch stats, then clear them if necessary. */
928 spin_lock_irqsave(&flow
->lock
, flags
);
929 get_stats(flow
, &stats
);
930 if (uf
.flags
& ODPPF_ZERO_STATS
)
932 spin_unlock_irqrestore(&flow
->lock
, flags
);
935 /* Copy stats to userspace. */
936 if (__copy_to_user(&ufp
->flow
.stats
, &stats
,
937 sizeof(struct odp_flow_stats
)))
942 kmem_cache_free(flow_cache
, flow
);
947 static int put_actions(const struct sw_flow
*flow
, struct odp_flow __user
*ufp
)
949 union odp_action __user
*actions
;
950 struct sw_flow_actions
*sf_acts
;
953 if (__get_user(actions
, &ufp
->actions
) ||
954 __get_user(n_actions
, &ufp
->n_actions
))
960 sf_acts
= rcu_dereference(flow
->sf_acts
);
961 if (__put_user(sf_acts
->n_actions
, &ufp
->n_actions
) ||
962 (actions
&& copy_to_user(actions
, sf_acts
->actions
,
963 sizeof(union odp_action
) *
964 min(sf_acts
->n_actions
, n_actions
))))
970 static int answer_query(struct sw_flow
*flow
, struct odp_flow __user
*ufp
)
972 struct odp_flow_stats stats
;
973 unsigned long int flags
;
975 spin_lock_irqsave(&flow
->lock
, flags
);
976 get_stats(flow
, &stats
);
977 spin_unlock_irqrestore(&flow
->lock
, flags
);
979 if (__copy_to_user(&ufp
->stats
, &stats
, sizeof(struct odp_flow_stats
)))
981 return put_actions(flow
, ufp
);
984 static int del_flow(struct datapath
*dp
, struct odp_flow __user
*ufp
)
986 struct dp_table
*table
= rcu_dereference(dp
->table
);
988 struct sw_flow
*flow
;
992 if (copy_from_user(&uf
, ufp
, sizeof uf
))
996 flow
= dp_table_lookup(table
, &uf
.key
);
1001 /* XXX redundant lookup */
1002 error
= dp_table_delete(table
, flow
);
1006 /* XXX These statistics might lose a few packets, since other CPUs can
1007 * be using this flow. We used to synchronize_rcu() to make sure that
1008 * we get completely accurate stats, but that blows our performance,
1011 error
= answer_query(flow
, ufp
);
1012 flow_deferred_free(flow
);
1018 static int query_flows(struct datapath
*dp
, const struct odp_flowvec
*flowvec
)
1020 struct dp_table
*table
= rcu_dereference(dp
->table
);
1022 for (i
= 0; i
< flowvec
->n_flows
; i
++) {
1023 struct __user odp_flow
*ufp
= &flowvec
->flows
[i
];
1025 struct sw_flow
*flow
;
1028 if (__copy_from_user(&uf
, ufp
, sizeof uf
))
1030 uf
.key
.reserved
= 0;
1032 flow
= dp_table_lookup(table
, &uf
.key
);
1034 error
= __put_user(ENOENT
, &ufp
->stats
.error
);
1036 error
= answer_query(flow
, ufp
);
1040 return flowvec
->n_flows
;
1043 struct list_flows_cbdata
{
1044 struct odp_flow __user
*uflows
;
1049 static int list_flow(struct sw_flow
*flow
, void *cbdata_
)
1051 struct list_flows_cbdata
*cbdata
= cbdata_
;
1052 struct odp_flow __user
*ufp
= &cbdata
->uflows
[cbdata
->listed_flows
++];
1055 if (__copy_to_user(&ufp
->key
, &flow
->key
, sizeof flow
->key
))
1057 error
= answer_query(flow
, ufp
);
1061 if (cbdata
->listed_flows
>= cbdata
->n_flows
)
1062 return cbdata
->listed_flows
;
1066 static int list_flows(struct datapath
*dp
, const struct odp_flowvec
*flowvec
)
1068 struct list_flows_cbdata cbdata
;
1071 if (!flowvec
->n_flows
)
1074 cbdata
.uflows
= flowvec
->flows
;
1075 cbdata
.n_flows
= flowvec
->n_flows
;
1076 cbdata
.listed_flows
= 0;
1077 error
= dp_table_foreach(rcu_dereference(dp
->table
),
1078 list_flow
, &cbdata
);
1079 return error
? error
: cbdata
.listed_flows
;
1082 static int do_flowvec_ioctl(struct datapath
*dp
, unsigned long argp
,
1083 int (*function
)(struct datapath
*,
1084 const struct odp_flowvec
*))
1086 struct odp_flowvec __user
*uflowvec
;
1087 struct odp_flowvec flowvec
;
1090 uflowvec
= (struct odp_flowvec __user
*)argp
;
1091 if (!access_ok(VERIFY_WRITE
, uflowvec
, sizeof *uflowvec
) ||
1092 copy_from_user(&flowvec
, uflowvec
, sizeof flowvec
))
1095 if (flowvec
.n_flows
> INT_MAX
/ sizeof(struct odp_flow
))
1098 if (!access_ok(VERIFY_WRITE
, flowvec
.flows
,
1099 flowvec
.n_flows
* sizeof(struct odp_flow
)))
1102 retval
= function(dp
, &flowvec
);
1103 return (retval
< 0 ? retval
1104 : retval
== flowvec
.n_flows
? 0
1105 : __put_user(retval
, &uflowvec
->n_flows
));
1108 static int do_execute(struct datapath
*dp
, const struct odp_execute
*executep
)
1110 struct odp_execute execute
;
1111 struct odp_flow_key key
;
1112 struct sk_buff
*skb
;
1113 struct sw_flow_actions
*actions
;
1117 if (copy_from_user(&execute
, executep
, sizeof execute
))
1121 if (execute
.length
< ETH_HLEN
|| execute
.length
> 65535)
1125 actions
= flow_actions_alloc(execute
.n_actions
);
1130 if (copy_from_user(actions
->actions
, execute
.actions
,
1131 execute
.n_actions
* sizeof *execute
.actions
))
1132 goto error_free_actions
;
1134 err
= validate_actions(actions
);
1136 goto error_free_actions
;
1139 skb
= alloc_skb(execute
.length
, GFP_KERNEL
);
1141 goto error_free_actions
;
1142 if (execute
.in_port
< DP_MAX_PORTS
) {
1143 struct net_bridge_port
*p
= dp
->ports
[execute
.in_port
];
1149 if (copy_from_user(skb_put(skb
, execute
.length
), execute
.data
,
1151 goto error_free_skb
;
1153 flow_extract(skb
, execute
.in_port
, &key
);
1154 err
= execute_actions(dp
, skb
, &key
, actions
->actions
,
1155 actions
->n_actions
, GFP_KERNEL
);
1167 static int get_dp_stats(struct datapath
*dp
, struct odp_stats __user
*statsp
)
1169 struct odp_stats stats
;
1172 stats
.n_flows
= dp
->n_flows
;
1173 stats
.cur_capacity
= rcu_dereference(dp
->table
)->n_buckets
* 2;
1174 stats
.max_capacity
= DP_MAX_BUCKETS
* 2;
1175 stats
.n_ports
= dp
->n_ports
;
1176 stats
.max_ports
= DP_MAX_PORTS
;
1177 stats
.max_groups
= DP_MAX_GROUPS
;
1178 stats
.n_frags
= stats
.n_hit
= stats
.n_missed
= stats
.n_lost
= 0;
1179 for_each_possible_cpu(i
) {
1180 const struct dp_stats_percpu
*s
;
1181 s
= percpu_ptr(dp
->stats_percpu
, i
);
1182 stats
.n_frags
+= s
->n_frags
;
1183 stats
.n_hit
+= s
->n_hit
;
1184 stats
.n_missed
+= s
->n_missed
;
1185 stats
.n_lost
+= s
->n_lost
;
1187 stats
.max_miss_queue
= DP_MAX_QUEUE_LEN
;
1188 stats
.max_action_queue
= DP_MAX_QUEUE_LEN
;
1189 return copy_to_user(statsp
, &stats
, sizeof stats
) ? -EFAULT
: 0;
1192 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1193 int dp_min_mtu(const struct datapath
*dp
)
1195 struct net_bridge_port
*p
;
1200 list_for_each_entry_rcu (p
, &dp
->port_list
, node
) {
1201 struct net_device
*dev
= p
->dev
;
1203 /* Skip any internal ports, since that's what we're trying to
1208 if (!mtu
|| dev
->mtu
< mtu
)
1212 return mtu
? mtu
: ETH_DATA_LEN
;
1216 put_port(const struct net_bridge_port
*p
, struct odp_port __user
*uop
)
1219 memset(&op
, 0, sizeof op
);
1220 strncpy(op
.devname
, p
->dev
->name
, sizeof op
.devname
);
1221 op
.port
= p
->port_no
;
1222 op
.flags
= is_dp_dev(p
->dev
) ? ODP_PORT_INTERNAL
: 0;
1223 return copy_to_user(uop
, &op
, sizeof op
) ? -EFAULT
: 0;
1227 query_port(struct datapath
*dp
, struct odp_port __user
*uport
)
1229 struct odp_port port
;
1231 if (copy_from_user(&port
, uport
, sizeof port
))
1233 if (port
.devname
[0]) {
1234 struct net_bridge_port
*p
;
1235 struct net_device
*dev
;
1238 port
.devname
[IFNAMSIZ
- 1] = '\0';
1240 dev
= dev_get_by_name(&init_net
, port
.devname
);
1245 if (!p
&& is_dp_dev(dev
)) {
1246 struct dp_dev
*dp_dev
= dp_dev_priv(dev
);
1247 if (dp_dev
->dp
== dp
)
1248 p
= dp
->ports
[dp_dev
->port_no
];
1250 err
= p
&& p
->dp
== dp
? put_port(p
, uport
) : -ENOENT
;
1255 if (port
.port
>= DP_MAX_PORTS
)
1257 if (!dp
->ports
[port
.port
])
1259 return put_port(dp
->ports
[port
.port
], uport
);
1264 list_ports(struct datapath
*dp
, struct odp_portvec __user
*pvp
)
1266 struct odp_portvec pv
;
1267 struct net_bridge_port
*p
;
1270 if (copy_from_user(&pv
, pvp
, sizeof pv
))
1275 list_for_each_entry_rcu (p
, &dp
->port_list
, node
) {
1276 if (put_port(p
, &pv
.ports
[idx
]))
1278 if (idx
++ >= pv
.n_ports
)
1282 return put_user(dp
->n_ports
, &pvp
->n_ports
);
1285 /* RCU callback for freeing a dp_port_group */
1286 static void free_port_group(struct rcu_head
*rcu
)
1288 struct dp_port_group
*g
= container_of(rcu
, struct dp_port_group
, rcu
);
1293 set_port_group(struct datapath
*dp
, const struct odp_port_group __user
*upg
)
1295 struct odp_port_group pg
;
1296 struct dp_port_group
*new_group
, *old_group
;
1300 if (copy_from_user(&pg
, upg
, sizeof pg
))
1304 if (pg
.n_ports
> DP_MAX_PORTS
|| pg
.group
>= DP_MAX_GROUPS
)
1308 new_group
= kmalloc(sizeof *new_group
+ sizeof(u16
) * pg
.n_ports
,
1313 new_group
->n_ports
= pg
.n_ports
;
1315 if (copy_from_user(new_group
->ports
, pg
.ports
,
1316 sizeof(u16
) * pg
.n_ports
))
1319 old_group
= rcu_dereference(dp
->groups
[pg
.group
]);
1320 rcu_assign_pointer(dp
->groups
[pg
.group
], new_group
);
1322 call_rcu(&old_group
->rcu
, free_port_group
);
1332 get_port_group(struct datapath
*dp
, struct odp_port_group
*upg
)
1334 struct odp_port_group pg
;
1335 struct dp_port_group
*g
;
1338 if (copy_from_user(&pg
, upg
, sizeof pg
))
1341 if (pg
.group
>= DP_MAX_GROUPS
)
1344 g
= dp
->groups
[pg
.group
];
1345 n_copy
= g
? min_t(int, g
->n_ports
, pg
.n_ports
) : 0;
1346 if (n_copy
&& copy_to_user(pg
.ports
, g
->ports
, n_copy
* sizeof(u16
)))
1349 if (put_user(g
? g
->n_ports
: 0, &upg
->n_ports
))
1355 static long openvswitch_ioctl(struct file
*f
, unsigned int cmd
,
1358 int dp_idx
= iminor(f
->f_dentry
->d_inode
);
1359 struct datapath
*dp
;
1360 int drop_frags
, listeners
, port_no
;
1363 /* Handle commands with special locking requirements up front. */
1366 err
= create_dp(dp_idx
, (char __user
*)argp
);
1369 case ODP_DP_DESTROY
:
1370 err
= destroy_dp(dp_idx
);
1374 err
= add_port(dp_idx
, (struct odp_port __user
*)argp
);
1378 err
= get_user(port_no
, (int __user
*)argp
);
1380 err
= del_port(dp_idx
, port_no
);
1384 dp
= get_dp_locked(dp_idx
);
1391 err
= get_dp_stats(dp
, (struct odp_stats __user
*)argp
);
1394 case ODP_GET_DROP_FRAGS
:
1395 err
= put_user(dp
->drop_frags
, (int __user
*)argp
);
1398 case ODP_SET_DROP_FRAGS
:
1399 err
= get_user(drop_frags
, (int __user
*)argp
);
1403 if (drop_frags
!= 0 && drop_frags
!= 1)
1405 dp
->drop_frags
= drop_frags
;
1409 case ODP_GET_LISTEN_MASK
:
1410 err
= put_user((int)f
->private_data
, (int __user
*)argp
);
1413 case ODP_SET_LISTEN_MASK
:
1414 err
= get_user(listeners
, (int __user
*)argp
);
1418 if (listeners
& ~ODPL_ALL
)
1421 f
->private_data
= (void*)listeners
;
1424 case ODP_PORT_QUERY
:
1425 err
= query_port(dp
, (struct odp_port __user
*)argp
);
1429 err
= list_ports(dp
, (struct odp_portvec __user
*)argp
);
1432 case ODP_PORT_GROUP_SET
:
1433 err
= set_port_group(dp
, (struct odp_port_group __user
*)argp
);
1436 case ODP_PORT_GROUP_GET
:
1437 err
= get_port_group(dp
, (struct odp_port_group __user
*)argp
);
1440 case ODP_FLOW_FLUSH
:
1441 err
= flush_flows(dp
);
1445 err
= put_flow(dp
, (struct odp_flow_put __user
*)argp
);
1449 err
= del_flow(dp
, (struct odp_flow __user
*)argp
);
1453 err
= do_flowvec_ioctl(dp
, argp
, query_flows
);
1457 err
= do_flowvec_ioctl(dp
, argp
, list_flows
);
1461 err
= do_execute(dp
, (struct odp_execute __user
*)argp
);
1468 mutex_unlock(&dp
->mutex
);
1473 static int dp_has_packet_of_interest(struct datapath
*dp
, int listeners
)
1476 for (i
= 0; i
< DP_N_QUEUES
; i
++) {
1477 if (listeners
& (1 << i
) && !skb_queue_empty(&dp
->queues
[i
]))
1483 ssize_t
openvswitch_read(struct file
*f
, char __user
*buf
, size_t nbytes
,
1486 /* XXX is there sufficient synchronization here? */
1487 int listeners
= (int) f
->private_data
;
1488 int dp_idx
= iminor(f
->f_dentry
->d_inode
);
1489 struct datapath
*dp
= get_dp(dp_idx
);
1490 struct sk_buff
*skb
;
1491 struct iovec __user iov
;
1498 if (nbytes
== 0 || !listeners
)
1504 for (i
= 0; i
< DP_N_QUEUES
; i
++) {
1505 if (listeners
& (1 << i
)) {
1506 skb
= skb_dequeue(&dp
->queues
[i
]);
1512 if (f
->f_flags
& O_NONBLOCK
) {
1517 wait_event_interruptible(dp
->waitqueue
,
1518 dp_has_packet_of_interest(dp
,
1521 if (signal_pending(current
)) {
1522 retval
= -ERESTARTSYS
;
1527 copy_bytes
= min(skb
->len
, nbytes
);
1529 iov
.iov_len
= copy_bytes
;
1530 retval
= skb_copy_datagram_iovec(skb
, 0, &iov
, iov
.iov_len
);
1532 retval
= copy_bytes
;
1539 static unsigned int openvswitch_poll(struct file
*file
, poll_table
*wait
)
1541 /* XXX is there sufficient synchronization here? */
1542 int dp_idx
= iminor(file
->f_dentry
->d_inode
);
1543 struct datapath
*dp
= get_dp(dp_idx
);
1548 poll_wait(file
, &dp
->waitqueue
, wait
);
1549 if (dp_has_packet_of_interest(dp
, (int)file
->private_data
))
1550 mask
|= POLLIN
| POLLRDNORM
;
1552 mask
= POLLIN
| POLLRDNORM
| POLLHUP
;
1557 struct file_operations openvswitch_fops
= {
1558 /* XXX .aio_read = openvswitch_aio_read, */
1559 .read
= openvswitch_read
,
1560 .poll
= openvswitch_poll
,
1561 .unlocked_ioctl
= openvswitch_ioctl
,
1562 /* XXX .fasync = openvswitch_fasync, */
1566 static struct llc_sap
*dp_stp_sap
;
1568 static int dp_stp_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
1569 struct packet_type
*pt
, struct net_device
*orig_dev
)
1571 /* We don't really care about STP packets, we just listen for them for
1572 * mutual exclusion with the bridge module, so this just discards
1578 static int __init
dp_init(void)
1582 printk("Open vSwitch %s, built "__DATE__
" "__TIME__
"\n", VERSION BUILDNR
);
1584 /* Register to receive STP packets because the bridge module also
1585 * attempts to do so. Since there can only be a single listener for a
1586 * given protocol, this provides mutual exclusion against the bridge
1587 * module, preventing both of them from being loaded at the same
1589 dp_stp_sap
= llc_sap_open(LLC_SAP_BSPAN
, dp_stp_rcv
);
1591 printk(KERN_ERR
"openvswitch: can't register sap for STP (probably the bridge module is loaded)\n");
1599 err
= register_netdevice_notifier(&dp_device_notifier
);
1601 goto error_flow_exit
;
1603 major
= register_chrdev(0, "openvswitch", &openvswitch_fops
);
1605 goto error_unreg_notifier
;
1607 /* Hook into callback used by the bridge to intercept packets.
1608 * Parasites we are. */
1609 br_handle_frame_hook
= dp_frame_hook
;
1613 error_unreg_notifier
:
1614 unregister_netdevice_notifier(&dp_device_notifier
);
1621 static void dp_cleanup(void)
1624 unregister_chrdev(major
, "openvswitch");
1625 unregister_netdevice_notifier(&dp_device_notifier
);
1627 br_handle_frame_hook
= NULL
;
1628 llc_sap_put(dp_stp_sap
);
1631 module_init(dp_init
);
1632 module_exit(dp_cleanup
);
1634 MODULE_DESCRIPTION("Open vSwitch switching datapath");
1635 MODULE_LICENSE("GPL");