2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009 Nicira Networks.
6 /* Functions for managing the dp interface/device. */
8 #include <linux/init.h>
9 #include <linux/module.h>
11 #include <linux/if_arp.h>
12 #include <linux/if_bridge.h>
13 #include <linux/if_vlan.h>
16 #include <linux/delay.h>
17 #include <linux/time.h>
18 #include <linux/etherdevice.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/llc.h>
22 #include <linux/mutex.h>
23 #include <linux/percpu.h>
24 #include <linux/rcupdate.h>
25 #include <linux/tcp.h>
26 #include <linux/udp.h>
27 #include <linux/version.h>
28 #include <linux/ethtool.h>
29 #include <linux/random.h>
30 #include <linux/wait.h>
31 #include <asm/system.h>
32 #include <asm/div64.h>
34 #include <linux/netfilter_bridge.h>
35 #include <linux/netfilter_ipv4.h>
36 #include <linux/inetdevice.h>
37 #include <linux/list.h>
38 #include <linux/rculist.h>
39 #include <linux/workqueue.h>
40 #include <linux/dmi.h>
43 #include "openvswitch/datapath-protocol.h"
52 int (*dp_ioctl_hook
)(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
53 EXPORT_SYMBOL(dp_ioctl_hook
);
55 int (*dp_add_dp_hook
)(struct datapath
*dp
);
56 EXPORT_SYMBOL(dp_add_dp_hook
);
58 int (*dp_del_dp_hook
)(struct datapath
*dp
);
59 EXPORT_SYMBOL(dp_del_dp_hook
);
61 int (*dp_add_if_hook
)(struct net_bridge_port
*p
);
62 EXPORT_SYMBOL(dp_add_if_hook
);
64 int (*dp_del_if_hook
)(struct net_bridge_port
*p
);
65 EXPORT_SYMBOL(dp_del_if_hook
);
67 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
68 * by dp_mutex. dp_mutex is almost completely redundant with genl_mutex
69 * maintained by the Generic Netlink code, but the timeout path needs mutual
72 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
75 * It is safe to access the datapath and net_bridge_port structures with just
78 static struct datapath
*dps
[ODP_MAX
];
79 static DEFINE_MUTEX(dp_mutex
);
81 /* Number of milliseconds between runs of the maintenance thread. */
82 #define MAINT_SLEEP_MSECS 1000
84 static int new_nbp(struct datapath
*, struct net_device
*, int port_no
);
86 /* Must be called with rcu_read_lock or dp_mutex. */
87 struct datapath
*get_dp(int dp_idx
)
89 if (dp_idx
< 0 || dp_idx
>= ODP_MAX
)
91 return rcu_dereference(dps
[dp_idx
]);
93 EXPORT_SYMBOL_GPL(get_dp
);
95 struct datapath
*get_dp_locked(int dp_idx
)
99 mutex_lock(&dp_mutex
);
102 mutex_lock(&dp
->mutex
);
103 mutex_unlock(&dp_mutex
);
107 static inline size_t br_nlmsg_size(void)
109 return NLMSG_ALIGN(sizeof(struct ifinfomsg
))
110 + nla_total_size(IFNAMSIZ
) /* IFLA_IFNAME */
111 + nla_total_size(MAX_ADDR_LEN
) /* IFLA_ADDRESS */
112 + nla_total_size(4) /* IFLA_MASTER */
113 + nla_total_size(4) /* IFLA_MTU */
114 + nla_total_size(4) /* IFLA_LINK */
115 + nla_total_size(1); /* IFLA_OPERSTATE */
118 static int dp_fill_ifinfo(struct sk_buff
*skb
,
119 const struct net_bridge_port
*port
,
120 int event
, unsigned int flags
)
122 const struct datapath
*dp
= port
->dp
;
123 const struct net_device
*dev
= port
->dev
;
124 struct ifinfomsg
*hdr
;
125 struct nlmsghdr
*nlh
;
127 nlh
= nlmsg_put(skb
, 0, 0, event
, sizeof(*hdr
), flags
);
131 hdr
= nlmsg_data(nlh
);
132 hdr
->ifi_family
= AF_BRIDGE
;
134 hdr
->ifi_type
= dev
->type
;
135 hdr
->ifi_index
= dev
->ifindex
;
136 hdr
->ifi_flags
= dev_get_flags(dev
);
139 NLA_PUT_STRING(skb
, IFLA_IFNAME
, dev
->name
);
140 NLA_PUT_U32(skb
, IFLA_MASTER
, dp
->ports
[ODPP_LOCAL
]->dev
->ifindex
);
141 NLA_PUT_U32(skb
, IFLA_MTU
, dev
->mtu
);
142 #ifdef IFLA_OPERSTATE
143 NLA_PUT_U8(skb
, IFLA_OPERSTATE
,
144 netif_running(dev
) ? dev
->operstate
: IF_OPER_DOWN
);
148 NLA_PUT(skb
, IFLA_ADDRESS
, dev
->addr_len
, dev
->dev_addr
);
150 if (dev
->ifindex
!= dev
->iflink
)
151 NLA_PUT_U32(skb
, IFLA_LINK
, dev
->iflink
);
153 return nlmsg_end(skb
, nlh
);
156 nlmsg_cancel(skb
, nlh
);
160 static void dp_ifinfo_notify(int event
, struct net_bridge_port
*port
)
162 struct net
*net
= dev_net(port
->dev
);
166 skb
= nlmsg_new(br_nlmsg_size(), GFP_KERNEL
);
170 err
= dp_fill_ifinfo(skb
, port
, event
, 0);
172 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
173 WARN_ON(err
== -EMSGSIZE
);
177 err
= rtnl_notify(skb
, net
, 0, RTNLGRP_LINK
, NULL
, GFP_KERNEL
);
180 rtnl_set_sk_err(net
, RTNLGRP_LINK
, err
);
183 static int create_dp(int dp_idx
, const char __user
*devnamep
)
185 struct net_device
*dp_dev
;
186 char devname
[IFNAMSIZ
];
193 if (strncpy_from_user(devname
, devnamep
, IFNAMSIZ
- 1) < 0)
195 devname
[IFNAMSIZ
- 1] = '\0';
197 snprintf(devname
, sizeof devname
, "of%d", dp_idx
);
201 mutex_lock(&dp_mutex
);
203 if (!try_module_get(THIS_MODULE
))
206 /* Exit early if a datapath with that number already exists.
207 * (We don't use -EEXIST because that's ambiguous with 'devname'
208 * conflicting with an existing network device name.) */
214 dp
= kzalloc(sizeof *dp
, GFP_KERNEL
);
218 mutex_init(&dp
->mutex
);
220 for (i
= 0; i
< DP_N_QUEUES
; i
++)
221 skb_queue_head_init(&dp
->queues
[i
]);
222 init_waitqueue_head(&dp
->waitqueue
);
224 /* Setup our datapath device */
225 dp_dev
= dp_dev_create(dp
, devname
, ODPP_LOCAL
);
226 err
= PTR_ERR(dp_dev
);
231 rcu_assign_pointer(dp
->table
, dp_table_create(DP_L1_SIZE
));
233 goto err_destroy_dp_dev
;
234 INIT_LIST_HEAD(&dp
->port_list
);
236 err
= new_nbp(dp
, dp_dev
, ODPP_LOCAL
);
238 goto err_destroy_table
;
241 dp
->stats_percpu
= alloc_percpu(struct dp_stats_percpu
);
242 if (!dp
->stats_percpu
)
243 goto err_destroy_local_port
;
245 rcu_assign_pointer(dps
[dp_idx
], dp
);
246 mutex_unlock(&dp_mutex
);
254 err_destroy_local_port
:
255 dp_del_port(dp
->ports
[ODPP_LOCAL
], NULL
);
257 dp_table_destroy(dp
->table
, 0);
259 dp_dev_destroy(dp_dev
);
263 module_put(THIS_MODULE
);
265 mutex_unlock(&dp_mutex
);
271 static void do_destroy_dp(struct datapath
*dp
, struct list_head
*dp_devs
)
273 struct net_bridge_port
*p
, *n
;
279 /* Drop references to DP. */
280 list_for_each_entry_safe (p
, n
, &dp
->port_list
, node
)
281 dp_del_port(p
, dp_devs
);
283 rcu_assign_pointer(dps
[dp
->dp_idx
], NULL
);
286 /* Wait until no longer in use, then destroy it. */
288 dp_table_destroy(dp
->table
, 1);
289 for (i
= 0; i
< DP_N_QUEUES
; i
++)
290 skb_queue_purge(&dp
->queues
[i
]);
291 for (i
= 0; i
< DP_MAX_GROUPS
; i
++)
292 kfree(dp
->groups
[i
]);
293 free_percpu(dp
->stats_percpu
);
295 module_put(THIS_MODULE
);
298 static int destroy_dp(int dp_idx
)
300 struct dp_dev
*dp_dev
, *next
;
306 mutex_lock(&dp_mutex
);
312 do_destroy_dp(dp
, &dp_devs
);
316 mutex_unlock(&dp_mutex
);
318 list_for_each_entry_safe (dp_dev
, next
, &dp_devs
, list
)
319 free_netdev(dp_dev
->dev
);
323 /* Called with RTNL lock and dp_mutex. */
324 static int new_nbp(struct datapath
*dp
, struct net_device
*dev
, int port_no
)
326 struct net_bridge_port
*p
;
328 if (dev
->br_port
!= NULL
)
331 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
335 dev_set_promiscuity(dev
, 1);
337 p
->port_no
= port_no
;
341 rcu_assign_pointer(dev
->br_port
, p
);
343 /* It would make sense to assign dev->br_port here too, but
344 * that causes packets received on internal ports to get caught
345 * in dp_frame_hook(). In turn dp_frame_hook() can reject them
346 * back to network stack, but that's a waste of time. */
348 rcu_assign_pointer(dp
->ports
[port_no
], p
);
349 list_add_rcu(&p
->node
, &dp
->port_list
);
352 dp_ifinfo_notify(RTM_NEWLINK
, p
);
357 static int add_port(int dp_idx
, struct odp_port __user
*portp
)
359 struct net_device
*dev
;
361 struct odp_port port
;
366 if (copy_from_user(&port
, portp
, sizeof port
))
368 port
.devname
[IFNAMSIZ
- 1] = '\0';
372 if (port_no
< 0 || port_no
>= DP_MAX_PORTS
)
376 dp
= get_dp_locked(dp_idx
);
379 goto out_unlock_rtnl
;
382 if (dp
->ports
[port_no
])
385 if (!(port
.flags
& ODP_PORT_INTERNAL
)) {
387 dev
= dev_get_by_name(&init_net
, port
.devname
);
392 if (dev
->flags
& IFF_LOOPBACK
|| dev
->type
!= ARPHRD_ETHER
||
396 dev
= dp_dev_create(dp
, port
.devname
, port_no
);
403 err
= new_nbp(dp
, dev
, port_no
);
408 dp_add_if_hook(dp
->ports
[port_no
]);
413 mutex_unlock(&dp
->mutex
);
420 int dp_del_port(struct net_bridge_port
*p
, struct list_head
*dp_devs
)
425 if (p
->port_no
!= ODPP_LOCAL
&& dp_del_if_hook
)
426 sysfs_remove_link(&p
->dp
->ifobj
, p
->dev
->name
);
428 dp_ifinfo_notify(RTM_DELLINK
, p
);
432 if (is_dp_dev(p
->dev
)) {
433 /* Make sure that no packets arrive from now on, since
434 * dp_dev_xmit() will try to find itself through
435 * p->dp->ports[], and we're about to set that to null. */
436 netif_tx_disable(p
->dev
);
439 /* First drop references to device. */
440 dev_set_promiscuity(p
->dev
, -1);
441 list_del_rcu(&p
->node
);
442 rcu_assign_pointer(p
->dp
->ports
[p
->port_no
], NULL
);
443 rcu_assign_pointer(p
->dev
->br_port
, NULL
);
445 /* Then wait until no one is still using it, and destroy it. */
448 if (is_dp_dev(p
->dev
)) {
449 dp_dev_destroy(p
->dev
);
451 struct dp_dev
*dp_dev
= dp_dev_priv(p
->dev
);
452 list_add(&dp_dev
->list
, dp_devs
);
455 if (p
->port_no
!= ODPP_LOCAL
&& dp_del_if_hook
) {
465 static int del_port(int dp_idx
, int port_no
)
467 struct dp_dev
*dp_dev
, *next
;
468 struct net_bridge_port
*p
;
474 if (port_no
< 0 || port_no
>= DP_MAX_PORTS
|| port_no
== ODPP_LOCAL
)
478 dp
= get_dp_locked(dp_idx
);
481 goto out_unlock_rtnl
;
483 p
= dp
->ports
[port_no
];
488 err
= dp_del_port(p
, &dp_devs
);
491 mutex_unlock(&dp
->mutex
);
495 list_for_each_entry_safe (dp_dev
, next
, &dp_devs
, list
)
496 free_netdev(dp_dev
->dev
);
500 /* Must be called with rcu_read_lock. */
502 do_port_input(struct net_bridge_port
*p
, struct sk_buff
*skb
)
504 /* Make our own copy of the packet. Otherwise we will mangle the
505 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
506 * (No one comes after us, since we tell handle_bridge() that we took
508 skb
= skb_share_check(skb
, GFP_ATOMIC
);
512 /* Push the Ethernet header back on. */
513 skb_push(skb
, ETH_HLEN
);
514 skb_reset_mac_header(skb
);
515 dp_process_received_packet(skb
, p
);
518 /* Must be called with rcu_read_lock and with bottom-halves disabled. */
519 void dp_process_received_packet(struct sk_buff
*skb
, struct net_bridge_port
*p
)
521 struct datapath
*dp
= p
->dp
;
522 struct dp_stats_percpu
*stats
;
523 struct odp_flow_key key
;
524 struct sw_flow
*flow
;
526 WARN_ON_ONCE(skb_shared(skb
));
527 WARN_ON_ONCE(skb
->destructor
);
529 /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
530 stats
= percpu_ptr(dp
->stats_percpu
, smp_processor_id());
532 if (flow_extract(skb
, p
? p
->port_no
: ODPP_NONE
, &key
)) {
533 if (dp
->drop_frags
) {
540 flow
= dp_table_lookup(rcu_dereference(dp
->table
), &key
);
542 struct sw_flow_actions
*acts
= rcu_dereference(flow
->sf_acts
);
543 flow_used(flow
, skb
);
544 execute_actions(dp
, skb
, &key
, acts
->actions
, acts
->n_actions
,
549 dp_output_control(dp
, skb
, _ODPL_MISS_NR
, 0);
554 * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on
555 * different set of devices!)
557 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
558 /* Called with rcu_read_lock and bottom-halves disabled. */
559 static struct sk_buff
*dp_frame_hook(struct net_bridge_port
*p
,
562 do_port_input(p
, skb
);
565 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
566 /* Called with rcu_read_lock and bottom-halves disabled. */
567 static int dp_frame_hook(struct net_bridge_port
*p
, struct sk_buff
**pskb
)
569 do_port_input(p
, *pskb
);
577 #if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,18)
578 /* This code is copied verbatim from net/dev/core.c in Xen's
579 * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call those functions
580 * directly because they aren't exported. */
581 static int skb_pull_up_to(struct sk_buff
*skb
, void *ptr
)
583 if (ptr
< (void *)skb
->tail
)
585 if (__pskb_pull_tail(skb
,
586 ptr
- (void *)skb
->data
- skb_headlen(skb
))) {
593 int skb_checksum_setup(struct sk_buff
*skb
)
595 if (skb
->proto_csum_blank
) {
596 if (skb
->protocol
!= htons(ETH_P_IP
))
598 if (!skb_pull_up_to(skb
, skb
->nh
.iph
+ 1))
600 skb
->h
.raw
= (unsigned char *)skb
->nh
.iph
+ 4*skb
->nh
.iph
->ihl
;
601 switch (skb
->nh
.iph
->protocol
) {
603 skb
->csum
= offsetof(struct tcphdr
, check
);
606 skb
->csum
= offsetof(struct udphdr
, check
);
610 printk(KERN_ERR
"Attempting to checksum a non-"
611 "TCP/UDP packet, dropping a protocol"
612 " %d packet", skb
->nh
.iph
->protocol
);
615 if (!skb_pull_up_to(skb
, skb
->h
.raw
+ skb
->csum
+ 2))
617 skb
->ip_summed
= CHECKSUM_HW
;
618 skb
->proto_csum_blank
= 0;
624 #endif /* linux == 2.6.18 */
625 #endif /* CONFIG_XEN */
628 dp_output_control(struct datapath
*dp
, struct sk_buff
*skb
, int queue_no
,
631 struct dp_stats_percpu
*stats
;
632 struct sk_buff_head
*queue
;
636 WARN_ON_ONCE(skb_shared(skb
));
637 BUG_ON(queue_no
!= _ODPL_MISS_NR
&& queue_no
!= _ODPL_ACTION_NR
);
639 queue
= &dp
->queues
[queue_no
];
641 if (skb_queue_len(queue
) >= DP_MAX_QUEUE_LEN
)
644 /* If a checksum-deferred packet is forwarded to the controller,
645 * correct the pointers and checksum. This happens on a regular basis
646 * only on Xen (the CHECKSUM_HW case), on which VMs can pass up packets
647 * that do not have their checksum computed. We also implement it for
648 * the non-Xen case, but it is difficult to trigger or test this case
649 * there, hence the WARN_ON_ONCE().
651 err
= skb_checksum_setup(skb
);
655 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
657 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
658 /* Until 2.6.22, the start of the transport header was also the
659 * start of data to be checksummed. Linux 2.6.22 introduced
660 * the csum_start field for this purpose, but we should point
661 * the transport header to it anyway for backward
662 * compatibility, as dev_queue_xmit() does even in 2.6.28. */
663 skb_set_transport_header(skb
, skb
->csum_start
-
666 err
= skb_checksum_help(skb
);
671 if (skb
->ip_summed
== CHECKSUM_HW
) {
672 err
= skb_checksum_help(skb
, 0);
678 /* Break apart GSO packets into their component pieces. Otherwise
679 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
680 if (skb_is_gso(skb
)) {
681 struct sk_buff
*nskb
= skb_gso_segment(skb
, 0);
685 if (unlikely(IS_ERR(skb
))) {
690 /* XXX This case might not be possible. It's hard to
691 * tell from the skb_gso_segment() code and comment. */
695 /* Figure out port number. */
696 port_no
= ODPP_LOCAL
;
698 if (skb
->dev
->br_port
)
699 port_no
= skb
->dev
->br_port
->port_no
;
700 else if (is_dp_dev(skb
->dev
))
701 port_no
= dp_dev_priv(skb
->dev
)->port_no
;
704 /* Append each packet to queue. There will be only one packet unless
705 * we broke up a GSO packet above. */
707 struct odp_msg
*header
;
708 struct sk_buff
*nskb
= skb
->next
;
711 err
= skb_cow(skb
, sizeof *header
);
721 header
= (struct odp_msg
*)__skb_push(skb
, sizeof *header
);
722 header
->type
= queue_no
;
723 header
->length
= skb
->len
;
724 header
->port
= port_no
;
725 header
->reserved
= 0;
727 skb_queue_tail(queue
, skb
);
732 wake_up_interruptible(&dp
->waitqueue
);
738 stats
= percpu_ptr(dp
->stats_percpu
, get_cpu());
745 static int flush_flows(struct datapath
*dp
)
748 return dp_table_flush(dp
);
751 static int validate_actions(const struct sw_flow_actions
*actions
)
755 for (i
= 0; i
< actions
->n_actions
; i
++) {
756 const union odp_action
*a
= &actions
->actions
[i
];
759 if (a
->output
.port
>= DP_MAX_PORTS
)
763 case ODPAT_OUTPUT_GROUP
:
764 if (a
->output_group
.group
>= DP_MAX_GROUPS
)
768 case ODPAT_SET_VLAN_VID
:
769 if (a
->vlan_vid
.vlan_vid
& htons(~VLAN_VID_MASK
))
773 case ODPAT_SET_VLAN_PCP
:
774 if (a
->vlan_pcp
.vlan_pcp
& ~VLAN_PCP_MASK
)
779 if (a
->type
>= ODPAT_N_ACTIONS
)
788 static struct sw_flow_actions
*get_actions(const struct odp_flow
*flow
)
790 struct sw_flow_actions
*actions
;
793 actions
= flow_actions_alloc(flow
->n_actions
);
794 error
= PTR_ERR(actions
);
799 if (copy_from_user(actions
->actions
, flow
->actions
,
800 flow
->n_actions
* sizeof(union odp_action
)))
801 goto error_free_actions
;
802 error
= validate_actions(actions
);
804 goto error_free_actions
;
811 return ERR_PTR(error
);
814 static void get_stats(struct sw_flow
*flow
, struct odp_flow_stats
*stats
)
816 if (flow
->used
.tv_sec
) {
817 stats
->used_sec
= flow
->used
.tv_sec
;
818 stats
->used_nsec
= flow
->used
.tv_nsec
;
821 stats
->used_nsec
= 0;
823 stats
->n_packets
= flow
->packet_count
;
824 stats
->n_bytes
= flow
->byte_count
;
825 stats
->ip_tos
= flow
->ip_tos
;
826 stats
->tcp_flags
= flow
->tcp_flags
;
829 static void clear_stats(struct sw_flow
*flow
)
831 flow
->used
.tv_sec
= flow
->used
.tv_nsec
= 0;
834 flow
->packet_count
= 0;
835 flow
->byte_count
= 0;
838 static int put_flow(struct datapath
*dp
, struct odp_flow_put __user
*ufp
)
840 struct odp_flow_put uf
;
841 struct sw_flow
*flow
, **bucket
;
842 struct dp_table
*table
;
843 struct odp_flow_stats stats
;
847 if (copy_from_user(&uf
, ufp
, sizeof(struct odp_flow_put
)))
849 uf
.flow
.key
.reserved
= 0;
852 table
= rcu_dereference(dp
->table
);
853 bucket
= dp_table_lookup_for_insert(table
, &uf
.flow
.key
);
855 /* No such flow, and the slots where it could go are full. */
856 error
= uf
.flags
& ODPPF_CREATE
? -EXFULL
: -ENOENT
;
858 } else if (!*bucket
) {
859 /* No such flow, but we found an available slot for it. */
860 struct sw_flow_actions
*acts
;
863 if (!(uf
.flags
& ODPPF_CREATE
))
866 /* Expand table, if necessary, to make room. */
867 if (dp
->n_flows
* 4 >= table
->n_buckets
&&
868 table
->n_buckets
< DP_MAX_BUCKETS
) {
869 error
= dp_table_expand(dp
);
873 /* The bucket's location has changed. Try again. */
879 flow
= kmem_cache_alloc(flow_cache
, GFP_KERNEL
);
882 flow
->key
= uf
.flow
.key
;
883 spin_lock_init(&flow
->lock
);
886 /* Obtain actions. */
887 acts
= get_actions(&uf
.flow
);
888 error
= PTR_ERR(acts
);
890 goto error_free_flow
;
891 rcu_assign_pointer(flow
->sf_acts
, acts
);
893 /* Put flow in bucket. */
894 rcu_assign_pointer(*bucket
, flow
);
896 memset(&stats
, 0, sizeof(struct odp_flow_stats
));
898 /* We found a matching flow. */
899 struct sw_flow
*flow
= *rcu_dereference(bucket
);
900 struct sw_flow_actions
*old_acts
, *new_acts
;
901 unsigned long int flags
;
903 /* Bail out if we're not allowed to modify an existing flow. */
905 if (!(uf
.flags
& ODPPF_MODIFY
))
909 new_acts
= get_actions(&uf
.flow
);
910 error
= PTR_ERR(new_acts
);
911 if (IS_ERR(new_acts
))
913 old_acts
= rcu_dereference(flow
->sf_acts
);
914 if (old_acts
->n_actions
!= new_acts
->n_actions
||
915 memcmp(old_acts
->actions
, new_acts
->actions
,
916 sizeof(union odp_action
) * old_acts
->n_actions
)) {
917 rcu_assign_pointer(flow
->sf_acts
, new_acts
);
918 flow_deferred_free_acts(old_acts
);
923 /* Fetch stats, then clear them if necessary. */
924 spin_lock_irqsave(&flow
->lock
, flags
);
925 get_stats(flow
, &stats
);
926 if (uf
.flags
& ODPPF_ZERO_STATS
)
928 spin_unlock_irqrestore(&flow
->lock
, flags
);
931 /* Copy stats to userspace. */
932 if (__copy_to_user(&ufp
->flow
.stats
, &stats
,
933 sizeof(struct odp_flow_stats
)))
938 kmem_cache_free(flow_cache
, flow
);
943 static int put_actions(const struct sw_flow
*flow
, struct odp_flow __user
*ufp
)
945 union odp_action __user
*actions
;
946 struct sw_flow_actions
*sf_acts
;
949 if (__get_user(actions
, &ufp
->actions
) ||
950 __get_user(n_actions
, &ufp
->n_actions
))
955 if (ufp
->n_actions
> INT_MAX
/ sizeof(union odp_action
))
958 sf_acts
= rcu_dereference(flow
->sf_acts
);
959 if (__put_user(sf_acts
->n_actions
, &ufp
->n_actions
) ||
960 (actions
&& copy_to_user(actions
, sf_acts
->actions
,
961 sizeof(union odp_action
) *
962 min(sf_acts
->n_actions
, n_actions
))))
968 static int answer_query(struct sw_flow
*flow
, struct odp_flow __user
*ufp
)
970 struct odp_flow_stats stats
;
971 unsigned long int flags
;
973 spin_lock_irqsave(&flow
->lock
, flags
);
974 get_stats(flow
, &stats
);
975 spin_unlock_irqrestore(&flow
->lock
, flags
);
977 if (__copy_to_user(&ufp
->stats
, &stats
, sizeof(struct odp_flow_stats
)))
979 return put_actions(flow
, ufp
);
982 static int del_or_query_flow(struct datapath
*dp
,
983 struct odp_flow __user
*ufp
,
986 struct dp_table
*table
= rcu_dereference(dp
->table
);
988 struct sw_flow
*flow
;
992 if (copy_from_user(&uf
, ufp
, sizeof uf
))
996 flow
= dp_table_lookup(table
, &uf
.key
);
1001 if (cmd
== ODP_FLOW_DEL
) {
1002 /* XXX redundant lookup */
1003 error
= dp_table_delete(table
, flow
);
1007 /* XXX These statistics might lose a few packets, since other
1008 * CPUs can be using this flow. We used to synchronize_rcu()
1009 * to make sure that we get completely accurate stats, but that
1010 * blows our performance, badly. */
1012 error
= answer_query(flow
, ufp
);
1013 flow_deferred_free(flow
);
1015 error
= answer_query(flow
, ufp
);
1022 static int query_multiple_flows(struct datapath
*dp
,
1023 const struct odp_flowvec
*flowvec
)
1025 struct dp_table
*table
= rcu_dereference(dp
->table
);
1027 for (i
= 0; i
< flowvec
->n_flows
; i
++) {
1028 struct __user odp_flow
*ufp
= &flowvec
->flows
[i
];
1030 struct sw_flow
*flow
;
1033 if (__copy_from_user(&uf
, ufp
, sizeof uf
))
1035 uf
.key
.reserved
= 0;
1037 flow
= dp_table_lookup(table
, &uf
.key
);
1039 error
= __clear_user(&ufp
->stats
, sizeof ufp
->stats
);
1041 error
= answer_query(flow
, ufp
);
1045 return flowvec
->n_flows
;
1048 struct list_flows_cbdata
{
1049 struct odp_flow __user
*uflows
;
1054 static int list_flow(struct sw_flow
*flow
, void *cbdata_
)
1056 struct list_flows_cbdata
*cbdata
= cbdata_
;
1057 struct odp_flow __user
*ufp
= &cbdata
->uflows
[cbdata
->listed_flows
++];
1060 if (__copy_to_user(&ufp
->key
, &flow
->key
, sizeof flow
->key
))
1062 error
= answer_query(flow
, ufp
);
1066 if (cbdata
->listed_flows
>= cbdata
->n_flows
)
1067 return cbdata
->listed_flows
;
1071 static int list_flows(struct datapath
*dp
, const struct odp_flowvec
*flowvec
)
1073 struct list_flows_cbdata cbdata
;
1076 if (!flowvec
->n_flows
)
1079 cbdata
.uflows
= flowvec
->flows
;
1080 cbdata
.n_flows
= flowvec
->n_flows
;
1081 cbdata
.listed_flows
= 0;
1082 error
= dp_table_foreach(rcu_dereference(dp
->table
),
1083 list_flow
, &cbdata
);
1084 return error
? error
: cbdata
.listed_flows
;
1087 static int do_flowvec_ioctl(struct datapath
*dp
, unsigned long argp
,
1088 int (*function
)(struct datapath
*,
1089 const struct odp_flowvec
*))
1091 struct odp_flowvec __user
*uflowvec
;
1092 struct odp_flowvec flowvec
;
1095 uflowvec
= (struct odp_flowvec __user
*)argp
;
1096 if (!access_ok(VERIFY_WRITE
, uflowvec
, sizeof *uflowvec
) ||
1097 copy_from_user(&flowvec
, uflowvec
, sizeof flowvec
))
1100 if (flowvec
.n_flows
> INT_MAX
/ sizeof(struct odp_flow
))
1103 if (!access_ok(VERIFY_WRITE
, flowvec
.flows
,
1104 flowvec
.n_flows
* sizeof(struct odp_flow
)))
1107 retval
= function(dp
, &flowvec
);
1108 return (retval
< 0 ? retval
1109 : retval
== flowvec
.n_flows
? 0
1110 : __put_user(retval
, &uflowvec
->n_flows
));
1113 static int do_execute(struct datapath
*dp
, const struct odp_execute
*executep
)
1115 struct odp_execute execute
;
1116 struct odp_flow_key key
;
1117 struct sk_buff
*skb
;
1118 struct sw_flow_actions
*actions
;
1122 if (copy_from_user(&execute
, executep
, sizeof execute
))
1126 if (execute
.length
< ETH_HLEN
|| execute
.length
> 65535)
1130 actions
= flow_actions_alloc(execute
.n_actions
);
1135 if (copy_from_user(actions
->actions
, execute
.actions
,
1136 execute
.n_actions
* sizeof *execute
.actions
))
1137 goto error_free_actions
;
1139 err
= validate_actions(actions
);
1141 goto error_free_actions
;
1144 skb
= alloc_skb(execute
.length
, GFP_KERNEL
);
1146 goto error_free_actions
;
1147 if (execute
.in_port
< DP_MAX_PORTS
) {
1148 struct net_bridge_port
*p
= dp
->ports
[execute
.in_port
];
1154 if (copy_from_user(skb_put(skb
, execute
.length
), execute
.data
,
1156 goto error_free_skb
;
1158 flow_extract(skb
, execute
.in_port
, &key
);
1159 err
= execute_actions(dp
, skb
, &key
, actions
->actions
,
1160 actions
->n_actions
, GFP_KERNEL
);
1173 get_dp_stats(struct datapath
*dp
, struct odp_stats __user
*statsp
)
1175 struct odp_stats stats
;
1178 stats
.n_flows
= dp
->n_flows
;
1179 stats
.cur_capacity
= rcu_dereference(dp
->table
)->n_buckets
* 2;
1180 stats
.max_capacity
= DP_MAX_BUCKETS
* 2;
1181 stats
.n_ports
= dp
->n_ports
;
1182 stats
.max_ports
= DP_MAX_PORTS
;
1183 stats
.max_groups
= DP_MAX_GROUPS
;
1184 stats
.n_frags
= stats
.n_hit
= stats
.n_missed
= stats
.n_lost
= 0;
1185 for_each_possible_cpu(i
) {
1186 const struct dp_stats_percpu
*s
;
1187 s
= percpu_ptr(dp
->stats_percpu
, i
);
1188 stats
.n_frags
+= s
->n_frags
;
1189 stats
.n_hit
+= s
->n_hit
;
1190 stats
.n_missed
+= s
->n_missed
;
1191 stats
.n_lost
+= s
->n_lost
;
1193 stats
.max_miss_queue
= DP_MAX_QUEUE_LEN
;
1194 stats
.max_action_queue
= DP_MAX_QUEUE_LEN
;
1195 return copy_to_user(statsp
, &stats
, sizeof stats
) ? -EFAULT
: 0;
1199 put_port(const struct net_bridge_port
*p
, struct odp_port __user
*uop
)
1202 memset(&op
, 0, sizeof op
);
1203 strncpy(op
.devname
, p
->dev
->name
, sizeof op
.devname
);
1204 op
.port
= p
->port_no
;
1205 op
.flags
= is_dp_dev(p
->dev
) ? ODP_PORT_INTERNAL
: 0;
1206 return copy_to_user(uop
, &op
, sizeof op
) ? -EFAULT
: 0;
1210 query_port(struct datapath
*dp
, struct odp_port __user
*uport
)
1212 struct odp_port port
;
1214 if (copy_from_user(&port
, uport
, sizeof port
))
1216 if (port
.devname
[0]) {
1217 struct net_bridge_port
*p
;
1218 struct net_device
*dev
;
1221 port
.devname
[IFNAMSIZ
- 1] = '\0';
1223 dev
= dev_get_by_name(&init_net
, port
.devname
);
1228 if (!p
&& is_dp_dev(dev
)) {
1229 struct dp_dev
*dp_dev
= dp_dev_priv(dev
);
1230 if (dp_dev
->dp
== dp
)
1231 p
= dp
->ports
[dp_dev
->port_no
];
1233 err
= p
&& p
->dp
== dp
? put_port(p
, uport
) : -ENOENT
;
1238 if (port
.port
>= DP_MAX_PORTS
)
1240 if (!dp
->ports
[port
.port
])
1242 return put_port(dp
->ports
[port
.port
], uport
);
1247 list_ports(struct datapath
*dp
, struct odp_portvec __user
*pvp
)
1249 struct odp_portvec pv
;
1250 struct net_bridge_port
*p
;
1253 if (copy_from_user(&pv
, pvp
, sizeof pv
))
1258 list_for_each_entry_rcu (p
, &dp
->port_list
, node
) {
1259 if (put_port(p
, &pv
.ports
[idx
]))
1261 if (idx
++ >= pv
.n_ports
)
1265 return put_user(idx
, &pvp
->n_ports
);
1268 /* RCU callback for freeing a dp_port_group */
1269 static void free_port_group(struct rcu_head
*rcu
)
1271 struct dp_port_group
*g
= container_of(rcu
, struct dp_port_group
, rcu
);
1276 set_port_group(struct datapath
*dp
, const struct odp_port_group __user
*upg
)
1278 struct odp_port_group pg
;
1279 struct dp_port_group
*new_group
, *old_group
;
1283 if (copy_from_user(&pg
, upg
, sizeof pg
))
1287 if (pg
.n_ports
> DP_MAX_PORTS
|| pg
.group
>= DP_MAX_GROUPS
)
1291 new_group
= kmalloc(sizeof *new_group
+ sizeof(u16
) * pg
.n_ports
,
1296 new_group
->n_ports
= pg
.n_ports
;
1298 if (copy_from_user(new_group
->ports
, pg
.ports
,
1299 sizeof(u16
) * pg
.n_ports
))
1302 old_group
= rcu_dereference(dp
->groups
[pg
.group
]);
1303 rcu_assign_pointer(dp
->groups
[pg
.group
], new_group
);
1305 call_rcu(&old_group
->rcu
, free_port_group
);
1315 get_port_group(struct datapath
*dp
, struct odp_port_group
*upg
)
1317 struct odp_port_group pg
;
1318 struct dp_port_group
*g
;
1321 if (copy_from_user(&pg
, upg
, sizeof pg
))
1324 if (pg
.group
>= DP_MAX_GROUPS
)
1327 g
= dp
->groups
[pg
.group
];
1328 n_copy
= g
? min_t(int, g
->n_ports
, pg
.n_ports
) : 0;
1329 if (n_copy
&& copy_to_user(pg
.ports
, g
->ports
, n_copy
* sizeof(u16
)))
1332 if (put_user(g
? g
->n_ports
: 0, &upg
->n_ports
))
1338 static long openvswitch_ioctl(struct file
*f
, unsigned int cmd
,
1341 int dp_idx
= iminor(f
->f_dentry
->d_inode
);
1342 struct datapath
*dp
;
1343 int drop_frags
, listeners
, port_no
;
1346 /* Handle commands with special locking requirements up front. */
1349 return create_dp(dp_idx
, (char __user
*)argp
);
1351 case ODP_DP_DESTROY
:
1352 return destroy_dp(dp_idx
);
1355 return add_port(dp_idx
, (struct odp_port __user
*)argp
);
1358 err
= get_user(port_no
, (int __user
*)argp
);
1361 return del_port(dp_idx
, port_no
);
1364 dp
= get_dp_locked(dp_idx
);
1370 err
= get_dp_stats(dp
, (struct odp_stats __user
*)argp
);
1373 case ODP_GET_DROP_FRAGS
:
1374 err
= put_user(dp
->drop_frags
, (int __user
*)argp
);
1377 case ODP_SET_DROP_FRAGS
:
1378 err
= get_user(drop_frags
, (int __user
*)argp
);
1382 if (drop_frags
!= 0 && drop_frags
!= 1)
1384 dp
->drop_frags
= drop_frags
;
1388 case ODP_GET_LISTEN_MASK
:
1389 err
= put_user((int)f
->private_data
, (int __user
*)argp
);
1392 case ODP_SET_LISTEN_MASK
:
1393 err
= get_user(listeners
, (int __user
*)argp
);
1397 if (listeners
& ~ODPL_ALL
)
1400 f
->private_data
= (void*)listeners
;
1403 case ODP_PORT_QUERY
:
1404 err
= query_port(dp
, (struct odp_port __user
*)argp
);
1408 err
= list_ports(dp
, (struct odp_portvec __user
*)argp
);
1411 case ODP_PORT_GROUP_SET
:
1412 err
= set_port_group(dp
, (struct odp_port_group __user
*)argp
);
1415 case ODP_PORT_GROUP_GET
:
1416 err
= get_port_group(dp
, (struct odp_port_group __user
*)argp
);
1419 case ODP_FLOW_FLUSH
:
1420 err
= flush_flows(dp
);
1424 err
= put_flow(dp
, (struct odp_flow_put __user
*)argp
);
1429 err
= del_or_query_flow(dp
, (struct odp_flow __user
*)argp
,
1433 case ODP_FLOW_GET_MULTIPLE
:
1434 err
= do_flowvec_ioctl(dp
, argp
, query_multiple_flows
);
1438 err
= do_flowvec_ioctl(dp
, argp
, list_flows
);
1442 err
= do_execute(dp
, (struct odp_execute __user
*)argp
);
1449 mutex_unlock(&dp
->mutex
);
1453 static int dp_has_packet_of_interest(struct datapath
*dp
, int listeners
)
1456 for (i
= 0; i
< DP_N_QUEUES
; i
++) {
1457 if (listeners
& (1 << i
) && !skb_queue_empty(&dp
->queues
[i
]))
1463 ssize_t
openvswitch_read(struct file
*f
, char __user
*buf
, size_t nbytes
,
1466 int listeners
= (int) f
->private_data
;
1467 int dp_idx
= iminor(f
->f_dentry
->d_inode
);
1468 struct datapath
*dp
= get_dp(dp_idx
);
1469 struct sk_buff
*skb
;
1470 struct iovec __user iov
;
1477 if (nbytes
== 0 || !listeners
)
1483 for (i
= 0; i
< DP_N_QUEUES
; i
++) {
1484 if (listeners
& (1 << i
)) {
1485 skb
= skb_dequeue(&dp
->queues
[i
]);
1491 if (f
->f_flags
& O_NONBLOCK
) {
1496 wait_event_interruptible(dp
->waitqueue
,
1497 dp_has_packet_of_interest(dp
,
1500 if (signal_pending(current
)) {
1501 retval
= -ERESTARTSYS
;
1506 copy_bytes
= min(skb
->len
, nbytes
);
1508 iov
.iov_len
= copy_bytes
;
1509 retval
= skb_copy_datagram_iovec(skb
, 0, &iov
, iov
.iov_len
);
1511 retval
= copy_bytes
;
1518 static unsigned int openvswitch_poll(struct file
*file
, poll_table
*wait
)
1520 int dp_idx
= iminor(file
->f_dentry
->d_inode
);
1521 struct datapath
*dp
= get_dp(dp_idx
);
1526 poll_wait(file
, &dp
->waitqueue
, wait
);
1527 if (dp_has_packet_of_interest(dp
, (int)file
->private_data
))
1528 mask
|= POLLIN
| POLLRDNORM
;
1530 mask
= POLLIN
| POLLRDNORM
| POLLHUP
;
1535 struct file_operations openvswitch_fops
= {
1536 /* XXX .aio_read = openvswitch_aio_read, */
1537 .read
= openvswitch_read
,
1538 .poll
= openvswitch_poll
,
1539 .unlocked_ioctl
= openvswitch_ioctl
,
1540 /* XXX .fasync = openvswitch_fasync, */
1544 static struct llc_sap
*dp_stp_sap
;
1546 static int dp_stp_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
1547 struct packet_type
*pt
, struct net_device
*orig_dev
)
1549 /* We don't really care about STP packets, we just listen for them for
1550 * mutual exclusion with the bridge module, so this just discards
1556 static int __init
dp_init(void)
1560 printk("Open vSwitch %s, built "__DATE__
" "__TIME__
"\n", VERSION BUILDNR
);
1562 /* Register to receive STP packets because the bridge module also
1563 * attempts to do so. Since there can only be a single listener for a
1564 * given protocol, this provides mutual exclusion against the bridge
1565 * module, preventing both of them from being loaded at the same
1567 dp_stp_sap
= llc_sap_open(LLC_SAP_BSPAN
, dp_stp_rcv
);
1569 printk(KERN_ERR
"openvswitch: can't register sap for STP (probably the bridge module is loaded)\n");
1577 err
= register_netdevice_notifier(&dp_device_notifier
);
1579 goto error_flow_exit
;
1581 major
= register_chrdev(0, "openvswitch", &openvswitch_fops
);
1583 goto error_unreg_notifier
;
1585 /* Hook into callback used by the bridge to intercept packets.
1586 * Parasites we are. */
1587 br_handle_frame_hook
= dp_frame_hook
;
1591 error_unreg_notifier
:
1592 unregister_netdevice_notifier(&dp_device_notifier
);
1599 static void dp_cleanup(void)
1602 unregister_chrdev(major
, "openvswitch");
1603 unregister_netdevice_notifier(&dp_device_notifier
);
1605 br_handle_frame_hook
= NULL
;
1606 llc_sap_put(dp_stp_sap
);
1609 module_init(dp_init
);
1610 module_exit(dp_cleanup
);
1612 MODULE_DESCRIPTION("Open vSwitch switching datapath");
1613 MODULE_LICENSE("GPL");